mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 01:28:27 +08:00
fix: [2.5] Update logging context and upgrade dependencies (#41319)
- issue: #41291 - pr: #41318 --------- Signed-off-by: SimFG <bang.fu@zilliz.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
parent
87b9cbffaf
commit
18eb627533
4
.env
4
.env
@ -5,8 +5,8 @@ IMAGE_ARCH=amd64
|
|||||||
OS_NAME=ubuntu22.04
|
OS_NAME=ubuntu22.04
|
||||||
|
|
||||||
# for services.builder.image in docker-compose.yml
|
# for services.builder.image in docker-compose.yml
|
||||||
DATE_VERSION=20250304-8253166
|
DATE_VERSION=20250421-ac1e043
|
||||||
LATEST_DATE_VERSION=20250304-8253166
|
LATEST_DATE_VERSION=20250421-ac1e043
|
||||||
# for services.gpubuilder.image in docker-compose.yml
|
# for services.gpubuilder.image in docker-compose.yml
|
||||||
GPU_DATE_VERSION=20250304-8253166
|
GPU_DATE_VERSION=20250304-8253166
|
||||||
LATEST_GPU_DATE_VERSION=20250304-8253166
|
LATEST_GPU_DATE_VERSION=20250304-8253166
|
||||||
|
|||||||
@ -1,18 +1,5 @@
|
|||||||
run:
|
run:
|
||||||
go: "1.21"
|
go: "1.22"
|
||||||
skip-dirs:
|
|
||||||
- build
|
|
||||||
- configs
|
|
||||||
- deployments
|
|
||||||
- docs
|
|
||||||
- scripts
|
|
||||||
- internal/core
|
|
||||||
- cmake_build
|
|
||||||
- mmap
|
|
||||||
- data
|
|
||||||
- ci
|
|
||||||
skip-files:
|
|
||||||
- partial_search_test.go
|
|
||||||
build-tags:
|
build-tags:
|
||||||
- dynamic
|
- dynamic
|
||||||
- test
|
- test
|
||||||
@ -51,7 +38,6 @@ linters-settings:
|
|||||||
enable: # add extra linters
|
enable: # add extra linters
|
||||||
- nilness
|
- nilness
|
||||||
gofumpt:
|
gofumpt:
|
||||||
lang-version: "1.18"
|
|
||||||
module-path: github.com/milvus-io
|
module-path: github.com/milvus-io
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: github.com/milvus-io
|
local-prefixes: github.com/milvus-io
|
||||||
@ -63,7 +49,7 @@ linters-settings:
|
|||||||
severity: warning
|
severity: warning
|
||||||
disabled: false
|
disabled: false
|
||||||
arguments:
|
arguments:
|
||||||
- ["ID"] # Allow list
|
- ["ID", "IDS"] # Allow list
|
||||||
- name: context-as-argument
|
- name: context-as-argument
|
||||||
severity: warning
|
severity: warning
|
||||||
disabled: false
|
disabled: false
|
||||||
@ -142,6 +128,19 @@ linters-settings:
|
|||||||
#- 'fmt\.Print.*' WIP
|
#- 'fmt\.Print.*' WIP
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
exclude-dirs:
|
||||||
|
- build
|
||||||
|
- configs
|
||||||
|
- deployments
|
||||||
|
- docs
|
||||||
|
- scripts
|
||||||
|
- internal/core
|
||||||
|
- cmake_build
|
||||||
|
- mmap
|
||||||
|
- data
|
||||||
|
- ci
|
||||||
|
exclude-files:
|
||||||
|
- partial_search_test.go
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- path: .+_test\.go
|
- path: .+_test\.go
|
||||||
@ -176,6 +175,28 @@ issues:
|
|||||||
- SA1019
|
- SA1019
|
||||||
# defer return errors
|
# defer return errors
|
||||||
- SA5001
|
- SA5001
|
||||||
|
# TODO: cleanup following exclusions, added on golangci-lint upgrade
|
||||||
|
- sloppyLen
|
||||||
|
- dupSubExpr
|
||||||
|
- assignOp
|
||||||
|
- ifElseChain
|
||||||
|
- elseif
|
||||||
|
- commentFormatting
|
||||||
|
- exitAfterDefer
|
||||||
|
- captLocal
|
||||||
|
- singleCaseSwitch
|
||||||
|
- typeSwitchVar
|
||||||
|
- indent-error-flow
|
||||||
|
- appendAssign
|
||||||
|
- deprecatedComment
|
||||||
|
- SA9009
|
||||||
|
- SA1006
|
||||||
|
- S1009
|
||||||
|
- offBy1
|
||||||
|
- unslice
|
||||||
|
# Integer overflow conversion
|
||||||
|
- G115
|
||||||
|
- has no field or method
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
|||||||
2
Makefile
2
Makefile
@ -50,7 +50,7 @@ ifdef USE_OPENDAL
|
|||||||
use_opendal = ${USE_OPENDAL}
|
use_opendal = ${USE_OPENDAL}
|
||||||
endif
|
endif
|
||||||
# golangci-lint
|
# golangci-lint
|
||||||
GOLANGCI_LINT_VERSION := 1.55.2
|
GOLANGCI_LINT_VERSION := 1.64.7
|
||||||
GOLANGCI_LINT_OUTPUT := $(shell $(INSTALL_PATH)/golangci-lint --version 2>/dev/null)
|
GOLANGCI_LINT_OUTPUT := $(shell $(INSTALL_PATH)/golangci-lint --version 2>/dev/null)
|
||||||
INSTALL_GOLANGCI_LINT := $(findstring $(GOLANGCI_LINT_VERSION), $(GOLANGCI_LINT_OUTPUT))
|
INSTALL_GOLANGCI_LINT := $(findstring $(GOLANGCI_LINT_VERSION), $(GOLANGCI_LINT_OUTPUT))
|
||||||
# mockery
|
# mockery
|
||||||
|
|||||||
@ -1,15 +1,8 @@
|
|||||||
run:
|
run:
|
||||||
go: "1.21"
|
go: "1.22"
|
||||||
skip-dirs:
|
build-tags:
|
||||||
- build
|
- dynamic
|
||||||
- configs
|
- test
|
||||||
- deployments
|
|
||||||
- docs
|
|
||||||
- scripts
|
|
||||||
- internal/core
|
|
||||||
- cmake_build
|
|
||||||
skip-files:
|
|
||||||
- partial_search_test.go
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
@ -54,7 +47,7 @@ linters-settings:
|
|||||||
severity: warning
|
severity: warning
|
||||||
disabled: false
|
disabled: false
|
||||||
arguments:
|
arguments:
|
||||||
- ["ID"] # Allow list
|
- ["ID", "IDS"] # Allow list
|
||||||
- name: context-as-argument
|
- name: context-as-argument
|
||||||
severity: warning
|
severity: warning
|
||||||
disabled: false
|
disabled: false
|
||||||
@ -129,6 +122,19 @@ linters-settings:
|
|||||||
#- 'fmt\.Print.*' WIP
|
#- 'fmt\.Print.*' WIP
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
exclude-dirs:
|
||||||
|
- build
|
||||||
|
- configs
|
||||||
|
- deployments
|
||||||
|
- docs
|
||||||
|
- scripts
|
||||||
|
- internal/core
|
||||||
|
- cmake_build
|
||||||
|
- mmap
|
||||||
|
- data
|
||||||
|
- ci
|
||||||
|
exclude-files:
|
||||||
|
- partial_search_test.go
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- path: .+_test\.go
|
- path: .+_test\.go
|
||||||
@ -161,6 +167,28 @@ issues:
|
|||||||
- SA1019
|
- SA1019
|
||||||
# defer return errors
|
# defer return errors
|
||||||
- SA5001
|
- SA5001
|
||||||
|
# TODO: cleanup following exclusions, added on golangci-lint upgrade
|
||||||
|
- sloppyLen
|
||||||
|
- dupSubExpr
|
||||||
|
- assignOp
|
||||||
|
- ifElseChain
|
||||||
|
- elseif
|
||||||
|
- commentFormatting
|
||||||
|
- exitAfterDefer
|
||||||
|
- captLocal
|
||||||
|
- singleCaseSwitch
|
||||||
|
- typeSwitchVar
|
||||||
|
- indent-error-flow
|
||||||
|
- appendAssign
|
||||||
|
- deprecatedComment
|
||||||
|
- SA9009
|
||||||
|
- SA1006
|
||||||
|
- S1009
|
||||||
|
- offBy1
|
||||||
|
- unslice
|
||||||
|
# Integer overflow conversion
|
||||||
|
- G115
|
||||||
|
- has no field or method
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
module github.com/milvus-io/milvus/client/v2
|
module github.com/milvus-io/milvus/client/v2
|
||||||
|
|
||||||
go 1.21
|
go 1.24.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/blang/semver/v4 v4.0.0
|
github.com/blang/semver/v4 v4.0.0
|
||||||
@ -97,12 +97,12 @@ require (
|
|||||||
go.uber.org/automaxprocs v1.5.3 // indirect
|
go.uber.org/automaxprocs v1.5.3 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/crypto v0.31.0 // indirect
|
golang.org/x/crypto v0.35.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||||
golang.org/x/net v0.33.0 // indirect
|
golang.org/x/net v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
golang.org/x/sync v0.11.0 // indirect
|
||||||
golang.org/x/sys v0.28.0 // indirect
|
golang.org/x/sys v0.30.0 // indirect
|
||||||
golang.org/x/text v0.21.0 // indirect
|
golang.org/x/text v0.22.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||||
|
|||||||
@ -579,8 +579,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -634,8 +634,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -650,8 +650,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -698,8 +698,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -708,8 +708,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -162,7 +163,7 @@ func (c *Client) setIdentifier(identifier string) {
|
|||||||
|
|
||||||
func (c *Client) connect(ctx context.Context, addr string, options ...grpc.DialOption) error {
|
func (c *Client) connect(ctx context.Context, addr string, options ...grpc.DialOption) error {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return fmt.Errorf("address is empty")
|
return errors.New("address is empty")
|
||||||
}
|
}
|
||||||
conn, err := grpc.DialContext(ctx, addr, options...)
|
conn, err := grpc.DialContext(ctx, addr, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -39,7 +39,7 @@ func ParseSchema(r interface{}) (*entity.Schema, error) {
|
|||||||
// MapRow is not supported for schema definition
|
// MapRow is not supported for schema definition
|
||||||
// TODO add PrimaryKey() interface later
|
// TODO add PrimaryKey() interface later
|
||||||
if t.Kind() == reflect.Map {
|
if t.Kind() == reflect.Map {
|
||||||
return nil, fmt.Errorf("map row is not supported for schema definition")
|
return nil, errors.New("map row is not supported for schema definition")
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Kind() != reflect.Struct {
|
if t.Kind() != reflect.Struct {
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
@ -67,11 +68,11 @@ func (f *BackupFile) WriteEntry(k, v string) error {
|
|||||||
|
|
||||||
func (f *BackupFile) ReadHeader() (header *BackupHeader, headerLength uint64, err error) {
|
func (f *BackupFile) ReadHeader() (header *BackupHeader, headerLength uint64, err error) {
|
||||||
if len(*f) < 8 {
|
if len(*f) < 8 {
|
||||||
return nil, 0, fmt.Errorf("invalid backup file, cannot read header length")
|
return nil, 0, errors.New("invalid backup file, cannot read header length")
|
||||||
}
|
}
|
||||||
headerLength = binary.LittleEndian.Uint64((*f)[:8])
|
headerLength = binary.LittleEndian.Uint64((*f)[:8])
|
||||||
if uint64(len(*f)) < 8+headerLength {
|
if uint64(len(*f)) < 8+headerLength {
|
||||||
return nil, 0, fmt.Errorf("invalid backup file, cannot read header")
|
return nil, 0, errors.New("invalid backup file, cannot read header")
|
||||||
}
|
}
|
||||||
header = &BackupHeader{}
|
header = &BackupHeader{}
|
||||||
if err := proto.Unmarshal((*f)[8:headerLength+8], header); err != nil {
|
if err := proto.Unmarshal((*f)[8:headerLength+8], header); err != nil {
|
||||||
@ -85,11 +86,11 @@ func (f *BackupFile) ReadEntryFromPos(pos uint64) (entryLength uint64, entry *co
|
|||||||
return 0, nil, io.EOF
|
return 0, nil, io.EOF
|
||||||
}
|
}
|
||||||
if uint64(len(*f)) < pos+8 {
|
if uint64(len(*f)) < pos+8 {
|
||||||
return 0, nil, fmt.Errorf("invalid backup file, cannot read entry length")
|
return 0, nil, errors.New("invalid backup file, cannot read entry length")
|
||||||
}
|
}
|
||||||
entryLength = binary.LittleEndian.Uint64((*f)[pos : pos+8])
|
entryLength = binary.LittleEndian.Uint64((*f)[pos : pos+8])
|
||||||
if uint64(len(*f)) < pos+8+entryLength {
|
if uint64(len(*f)) < pos+8+entryLength {
|
||||||
return 0, nil, fmt.Errorf("invalid backup file, cannot read entry")
|
return 0, nil, errors.New("invalid backup file, cannot read entry")
|
||||||
}
|
}
|
||||||
entry = &commonpb.KeyDataPair{}
|
entry = &commonpb.KeyDataPair{}
|
||||||
if err := proto.Unmarshal((*f)[pos+8:pos+8+entryLength], entry); err != nil {
|
if err := proto.Unmarshal((*f)[pos+8:pos+8+entryLength], entry); err != nil {
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
@ -47,7 +48,7 @@ func (b etcd210) loadTtAliases() (meta.TtAliasesMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -79,7 +80,7 @@ func (b etcd210) loadAliases() (meta.AliasesMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -107,7 +108,7 @@ func (b etcd210) loadTtCollections() (meta.TtCollectionsMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -149,7 +150,7 @@ func (b etcd210) loadCollections() (meta.CollectionsMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -204,7 +205,7 @@ func (b etcd210) loadCollectionIndexes() (meta.CollectionIndexesMeta210, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -232,7 +233,7 @@ func (b etcd210) loadSegmentIndexes() (meta.SegmentIndexesMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -255,7 +256,7 @@ func (b etcd210) loadIndexBuildMeta() (meta.IndexBuildMeta210, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
l := len(keys)
|
l := len(keys)
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
@ -284,7 +285,7 @@ func (b etcd210) loadLastDDLRecords() (meta.LastDDLRecords, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return nil, fmt.Errorf("length mismatch")
|
return nil, errors.New("length mismatch")
|
||||||
}
|
}
|
||||||
for i, k := range keys {
|
for i, k := range keys {
|
||||||
records.AddRecord(k, values[i])
|
records.AddRecord(k, values[i])
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/blang/semver/v4"
|
"github.com/blang/semver/v4"
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
@ -143,7 +144,7 @@ func (r *Runner) checkMySelf() error {
|
|||||||
}
|
}
|
||||||
for _, session := range sessions {
|
for _, session := range sessions {
|
||||||
if session.Address != r.address {
|
if session.Address != r.address {
|
||||||
return fmt.Errorf("other migration is running")
|
return errors.New("other migration is running")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
33
go.mod
33
go.mod
@ -1,8 +1,6 @@
|
|||||||
module github.com/milvus-io/milvus
|
module github.com/milvus-io/milvus
|
||||||
|
|
||||||
go 1.22
|
go 1.24.1
|
||||||
|
|
||||||
toolchain go1.22.7
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||||
@ -10,7 +8,7 @@ require (
|
|||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
|
||||||
github.com/aliyun/credentials-go v1.2.7
|
github.com/aliyun/credentials-go v1.2.7
|
||||||
github.com/apache/arrow/go/v12 v12.0.1
|
github.com/apache/arrow/go/v12 v12.0.1
|
||||||
github.com/bits-and-blooms/bloom/v3 v3.0.1
|
github.com/bits-and-blooms/bloom/v3 v3.3.1
|
||||||
github.com/blang/semver/v4 v4.0.0
|
github.com/blang/semver/v4 v4.0.0
|
||||||
github.com/casbin/casbin/v2 v2.44.2
|
github.com/casbin/casbin/v2 v2.44.2
|
||||||
github.com/casbin/json-adapter/v2 v2.0.0
|
github.com/casbin/json-adapter/v2 v2.0.0
|
||||||
@ -46,12 +44,12 @@ require (
|
|||||||
go.uber.org/atomic v1.11.0
|
go.uber.org/atomic v1.11.0
|
||||||
go.uber.org/multierr v1.11.0
|
go.uber.org/multierr v1.11.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/crypto v0.31.0
|
golang.org/x/crypto v0.35.0
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||||
golang.org/x/net v0.33.0
|
golang.org/x/net v0.36.0
|
||||||
golang.org/x/oauth2 v0.21.0
|
golang.org/x/oauth2 v0.27.0
|
||||||
golang.org/x/sync v0.10.0
|
golang.org/x/sync v0.11.0
|
||||||
golang.org/x/text v0.21.0
|
golang.org/x/text v0.22.0
|
||||||
google.golang.org/grpc v1.65.0
|
google.golang.org/grpc v1.65.0
|
||||||
google.golang.org/grpc/examples v0.0.0-20220617181431-3e7b97febc7f
|
google.golang.org/grpc/examples v0.0.0-20220617181431-3e7b97febc7f
|
||||||
)
|
)
|
||||||
@ -60,7 +58,7 @@ require (
|
|||||||
cloud.google.com/go/storage v1.43.0
|
cloud.google.com/go/storage v1.43.0
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1
|
github.com/antlr4-go/antlr/v4 v4.13.1
|
||||||
github.com/bits-and-blooms/bitset v1.10.0
|
github.com/bits-and-blooms/bitset v1.10.0
|
||||||
github.com/bytedance/sonic v1.12.2
|
github.com/bytedance/sonic v1.13.2
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
github.com/cockroachdb/redact v1.1.3
|
github.com/cockroachdb/redact v1.1.3
|
||||||
github.com/goccy/go-json v0.10.3
|
github.com/goccy/go-json v0.10.3
|
||||||
@ -103,12 +101,11 @@ require (
|
|||||||
github.com/ardielle/ardielle-go v1.5.2 // indirect
|
github.com/ardielle/ardielle-go v1.5.2 // indirect
|
||||||
github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b // indirect
|
github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bytedance/sonic/loader v0.2.0 // indirect
|
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||||
github.com/campoy/embedmd v1.0.0 // indirect
|
github.com/campoy/embedmd v1.0.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/cilium/ebpf v0.11.0 // indirect
|
github.com/cilium/ebpf v0.11.0 // indirect
|
||||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
|
||||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect
|
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect
|
||||||
github.com/confluentinc/confluent-kafka-go v1.9.1 // indirect
|
github.com/confluentinc/confluent-kafka-go v1.9.1 // indirect
|
||||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||||
@ -139,7 +136,7 @@ require (
|
|||||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/mock v1.6.0 // indirect
|
github.com/golang/mock v1.6.0 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
@ -220,7 +217,7 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
github.com/twmb/murmur3 v1.1.6 // indirect
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
||||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
@ -244,8 +241,8 @@ require (
|
|||||||
go.uber.org/automaxprocs v1.5.3 // indirect
|
go.uber.org/automaxprocs v1.5.3 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
golang.org/x/mod v0.17.0 // indirect
|
golang.org/x/mod v0.17.0 // indirect
|
||||||
golang.org/x/sys v0.28.0 // indirect
|
golang.org/x/sys v0.30.0 // indirect
|
||||||
golang.org/x/term v0.27.0 // indirect
|
golang.org/x/term v0.29.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
@ -265,7 +262,7 @@ replace (
|
|||||||
github.com/apache/arrow/go/v12 => github.com/milvus-io/arrow/go/v12 v12.0.1
|
github.com/apache/arrow/go/v12 => github.com/milvus-io/arrow/go/v12 v12.0.1
|
||||||
github.com/apache/pulsar-client-go => github.com/milvus-io/pulsar-client-go v0.12.1
|
github.com/apache/pulsar-client-go => github.com/milvus-io/pulsar-client-go v0.12.1
|
||||||
github.com/bketelsen/crypt => github.com/bketelsen/crypt v0.0.4 // Fix security alert for core-os/etcd
|
github.com/bketelsen/crypt => github.com/bketelsen/crypt v0.0.4 // Fix security alert for core-os/etcd
|
||||||
github.com/expr-lang/expr => github.com/SimFG/expr v0.0.0-20241226082220-a9a764953bf8
|
github.com/expr-lang/expr => github.com/SimFG/expr v0.0.0-20250415035630-0728e795e4e9
|
||||||
github.com/go-kit/kit => github.com/go-kit/kit v0.1.0
|
github.com/go-kit/kit => github.com/go-kit/kit v0.1.0
|
||||||
github.com/greatroar/blobloom => github.com/milvus-io/blobloom v0.0.0-20240603110411-471ae49f3b93
|
github.com/greatroar/blobloom => github.com/milvus-io/blobloom v0.0.0-20240603110411-471ae49f3b93
|
||||||
github.com/ianlancetaylor/cgosymbolizer => github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119
|
github.com/ianlancetaylor/cgosymbolizer => github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119
|
||||||
|
|||||||
59
go.sum
59
go.sum
@ -86,8 +86,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1
|
|||||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
github.com/SimFG/expr v0.0.0-20241226082220-a9a764953bf8 h1:boN3QhAWQU9O8EYQWxN7AEYav39PuD29QzZwTiI8Ca0=
|
github.com/SimFG/expr v0.0.0-20250415035630-0728e795e4e9 h1:p/1Prokv2YkGbcyLV/gOD28Gr3VgMXIa0c9ulg5KjOY=
|
||||||
github.com/SimFG/expr v0.0.0-20241226082220-a9a764953bf8/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
github.com/SimFG/expr v0.0.0-20250415035630-0728e795e4e9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||||
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
|
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
|
||||||
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
|
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
|
||||||
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
|
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
|
||||||
@ -129,20 +129,20 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
github.com/bits-and-blooms/bitset v1.3.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||||
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
||||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
github.com/bits-and-blooms/bloom/v3 v3.0.1 h1:Inlf0YXbgehxVjMPmCGv86iMCKMGPPrPSHtBF5yRHwA=
|
github.com/bits-and-blooms/bloom/v3 v3.3.1 h1:K2+A19bXT8gJR5mU7y+1yW6hsKfNCjcP2uNfLFKncjQ=
|
||||||
github.com/bits-and-blooms/bloom/v3 v3.0.1/go.mod h1:MC8muvBzzPOFsrcdND/A7kU7kMhkqb9KI70JlZCP+C8=
|
github.com/bits-and-blooms/bloom/v3 v3.3.1/go.mod h1:bhUUknWd5khVbTe4UgMCSiOOVJzr3tMoijSK3WwvW90=
|
||||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||||
github.com/bytedance/sonic v1.12.2 h1:oaMFuRTpMHYLpCntGca65YWt5ny+wAceDERTkT2L9lg=
|
github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ=
|
||||||
github.com/bytedance/sonic v1.12.2/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
|
github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||||
github.com/bytedance/sonic/loader v0.2.0 h1:zNprn+lsIP06C/IqCHs3gPQIvnvpKbbxyXQP1iU4kWM=
|
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||||
github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||||
github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
|
github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
|
||||||
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
|
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
|
||||||
github.com/casbin/casbin/v2 v2.0.0/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
github.com/casbin/casbin/v2 v2.0.0/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||||
@ -166,9 +166,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
|||||||
github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
|
github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
|
||||||
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
|
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
|
||||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
@ -346,8 +345,8 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY
|
|||||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||||
@ -894,8 +893,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4
|
|||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||||
github.com/twmb/murmur3 v1.1.3 h1:D83U0XYKcHRYwYIpBKf3Pks91Z0Byda/9SJ8B6EMRcA=
|
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
|
||||||
github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
@ -1038,8 +1037,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -1138,8 +1137,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
|||||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -1152,8 +1151,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -1166,8 +1165,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -1246,11 +1245,11 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -1260,8 +1259,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
|||||||
@ -18,10 +18,10 @@ package datacoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -673,7 +673,7 @@ func (s *ChannelManagerSuite) TestAdvanceChannelState() {
|
|||||||
}
|
}
|
||||||
s.prepareMeta(chNodes, datapb.ChannelWatchState_ToWatch)
|
s.prepareMeta(chNodes, datapb.ChannelWatchState_ToWatch)
|
||||||
s.mockCluster.EXPECT().NotifyChannelOperation(mock.Anything, mock.Anything, mock.Anything).
|
s.mockCluster.EXPECT().NotifyChannelOperation(mock.Anything, mock.Anything, mock.Anything).
|
||||||
Return(fmt.Errorf("mock error")).Twice()
|
Return(errors.New("mock error")).Twice()
|
||||||
m, err := NewChannelManager(s.mockKv, s.mockHandler, s.mockCluster, s.mockAlloc)
|
m, err := NewChannelManager(s.mockKv, s.mockHandler, s.mockCluster, s.mockAlloc)
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
s.checkAssignment(m, 1, "ch1", ToWatch)
|
s.checkAssignment(m, 1, "ch1", ToWatch)
|
||||||
@ -706,7 +706,7 @@ func (s *ChannelManagerSuite) TestAdvanceChannelState() {
|
|||||||
}
|
}
|
||||||
s.prepareMeta(chNodes, datapb.ChannelWatchState_ToRelease)
|
s.prepareMeta(chNodes, datapb.ChannelWatchState_ToRelease)
|
||||||
s.mockCluster.EXPECT().NotifyChannelOperation(mock.Anything, mock.Anything, mock.Anything).
|
s.mockCluster.EXPECT().NotifyChannelOperation(mock.Anything, mock.Anything, mock.Anything).
|
||||||
Return(fmt.Errorf("mock error")).Twice()
|
Return(errors.New("mock error")).Twice()
|
||||||
m, err := NewChannelManager(s.mockKv, s.mockHandler, s.mockCluster, s.mockAlloc)
|
m, err := NewChannelManager(s.mockKv, s.mockHandler, s.mockCluster, s.mockAlloc)
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
s.checkAssignment(m, 1, "ch1", ToRelease)
|
s.checkAssignment(m, 1, "ch1", ToRelease)
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
@ -398,7 +399,7 @@ func (m *indexMeta) canCreateIndex(req *indexpb.CreateIndexRequest, isJson bool)
|
|||||||
// creating multiple indexes on same field is not supported
|
// creating multiple indexes on same field is not supported
|
||||||
errMsg := "CreateIndex failed: creating multiple indexes on same field is not supported"
|
errMsg := "CreateIndex failed: creating multiple indexes on same field is not supported"
|
||||||
log.Warn(errMsg)
|
log.Warn(errMsg)
|
||||||
return 0, fmt.Errorf(errMsg)
|
return 0, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, nil
|
return 0, nil
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -228,7 +229,7 @@ func (s *Server) parseAndVerifyNestedPath(identifier string, schema *schemapb.Co
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if identifierExpr.GetColumnExpr().GetInfo().GetFieldId() != fieldID {
|
if identifierExpr.GetColumnExpr().GetInfo().GetFieldId() != fieldID {
|
||||||
return "", fmt.Errorf("fieldID not match with field name")
|
return "", errors.New("fieldID not match with field name")
|
||||||
}
|
}
|
||||||
|
|
||||||
nestedPath := identifierExpr.GetColumnExpr().GetInfo().GetNestedPath()
|
nestedPath := identifierExpr.GetColumnExpr().GetInfo().GetNestedPath()
|
||||||
|
|||||||
@ -18,7 +18,6 @@ package datacoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -114,7 +113,7 @@ func TestServer_CreateIndex(t *testing.T) {
|
|||||||
b := mocks.NewMockRootCoordClient(t)
|
b := mocks.NewMockRootCoordClient(t)
|
||||||
|
|
||||||
t.Run("get field name failed", func(t *testing.T) {
|
t.Run("get field name failed", func(t *testing.T) {
|
||||||
b.EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
b.EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
s.broker = broker.NewCoordinatorBroker(b)
|
s.broker = broker.NewCoordinatorBroker(b)
|
||||||
resp, err := s.CreateIndex(ctx, req)
|
resp, err := s.CreateIndex(ctx, req)
|
||||||
|
|||||||
@ -337,7 +337,7 @@ func (c *DataNodeManagerImpl) GetCompactionPlanResult(nodeID int64, planID int64
|
|||||||
|
|
||||||
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
|
||||||
log.Info("GetCompactionState state is not", zap.Error(err))
|
log.Info("GetCompactionState state is not", zap.Error(err))
|
||||||
return nil, fmt.Errorf("GetCopmactionState failed")
|
return nil, errors.New("GetCopmactionState failed")
|
||||||
}
|
}
|
||||||
var result *datapb.CompactionPlanResult
|
var result *datapb.CompactionPlanResult
|
||||||
for _, rst := range resp.GetResults() {
|
for _, rst := range resp.GetResults() {
|
||||||
|
|||||||
@ -19,9 +19,10 @@ package datacoord
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/metastore"
|
"github.com/milvus-io/milvus/internal/metastore"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
|
|||||||
@ -18,9 +18,9 @@ package datacoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
|
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog := mocks.NewDataCoordCatalog(s.T())
|
catalog := mocks.NewDataCoordCatalog(s.T())
|
||||||
catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
m, err := newStatsTaskMeta(context.Background(), catalog)
|
m, err := newStatsTaskMeta(context.Background(), catalog)
|
||||||
s.Error(err)
|
s.Error(err)
|
||||||
@ -105,7 +105,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
|
|
||||||
s.Run("AddStatsTask", func() {
|
s.Run("AddStatsTask", func() {
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
s.Error(m.AddStatsTask(t))
|
s.Error(m.AddStatsTask(t))
|
||||||
_, ok := m.tasks.Get(1)
|
_, ok := m.tasks.Get(1)
|
||||||
@ -145,7 +145,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
s.Error(m.UpdateVersion(1, 1180))
|
s.Error(m.UpdateVersion(1, 1180))
|
||||||
task, ok := m.tasks.Get(1)
|
task, ok := m.tasks.Get(1)
|
||||||
@ -157,7 +157,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
|
|
||||||
s.Run("UpdateBuildingTask", func() {
|
s.Run("UpdateBuildingTask", func() {
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
s.Error(m.UpdateBuildingTask(1))
|
s.Error(m.UpdateBuildingTask(1))
|
||||||
task, ok := m.tasks.Get(1)
|
task, ok := m.tasks.Get(1)
|
||||||
@ -214,7 +214,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
NumRows: 2048,
|
NumRows: 2048,
|
||||||
}
|
}
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
s.Error(m.FinishTask(1, result))
|
s.Error(m.FinishTask(1, result))
|
||||||
task, ok := m.tasks.Get(1)
|
task, ok := m.tasks.Get(1)
|
||||||
@ -265,7 +265,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
|||||||
|
|
||||||
s.Run("DropStatsTask", func() {
|
s.Run("DropStatsTask", func() {
|
||||||
s.Run("failed case", func() {
|
s.Run("failed case", func() {
|
||||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
s.Error(m.DropStatsTask(1))
|
s.Error(m.DropStatsTask(1))
|
||||||
_, ok := m.tasks.Get(1)
|
_, ok := m.tasks.Get(1)
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/types"
|
"github.com/milvus-io/milvus/internal/types"
|
||||||
@ -151,7 +152,7 @@ func (st *statsTask) UpdateVersion(ctx context.Context, nodeID int64, meta *meta
|
|||||||
// reset compacting
|
// reset compacting
|
||||||
meta.SetSegmentsCompacting(ctx, []UniqueID{st.segmentID}, false)
|
meta.SetSegmentsCompacting(ctx, []UniqueID{st.segmentID}, false)
|
||||||
st.SetStartTime(time.Now())
|
st.SetStartTime(time.Now())
|
||||||
return fmt.Errorf("segment is contains by l0 compaction")
|
return errors.New("segment is contains by l0 compaction")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := meta.statsTaskMeta.UpdateVersion(st.taskID, nodeID); err != nil {
|
if err := meta.statsTaskMeta.UpdateVersion(st.taskID, nodeID); err != nil {
|
||||||
|
|||||||
@ -18,14 +18,13 @@ package datacoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/atomic"
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
@ -202,7 +201,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(true)
|
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(true)
|
||||||
|
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("error")).Once()
|
||||||
s.Error(st.UpdateVersion(context.Background(), 1, s.mt, compactionHandler))
|
s.Error(st.UpdateVersion(context.Background(), 1, s.mt, compactionHandler))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -217,7 +216,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
s.Run("update error", func() {
|
s.Run("update error", func() {
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("error")).Once()
|
||||||
s.Error(st.UpdateMetaBuildingState(s.mt))
|
s.Error(st.UpdateMetaBuildingState(s.mt))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -251,7 +250,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
s.mt.segments.segments[s.segID].IsSorted = false
|
s.mt.segments.segments[s.segID].IsSorted = false
|
||||||
|
|
||||||
handler := NewNMockHandler(s.T())
|
handler := NewNMockHandler(s.T())
|
||||||
handler.EXPECT().GetCollection(context.Background(), collID).Return(nil, fmt.Errorf("mock error")).Once()
|
handler.EXPECT().GetCollection(context.Background(), collID).Return(nil, errors.New("mock error")).Once()
|
||||||
checkPass := st.PreCheck(context.Background(), &taskScheduler{
|
checkPass := st.PreCheck(context.Background(), &taskScheduler{
|
||||||
meta: s.mt,
|
meta: s.mt,
|
||||||
handler: handler,
|
handler: handler,
|
||||||
@ -299,7 +298,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
|
|
||||||
s.Run("alloc failed", func() {
|
s.Run("alloc failed", func() {
|
||||||
alloc := allocator.NewMockAllocator(s.T())
|
alloc := allocator.NewMockAllocator(s.T())
|
||||||
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, fmt.Errorf("mock error"))
|
alloc.EXPECT().AllocN(mock.Anything).Return(0, 0, errors.New("mock error"))
|
||||||
|
|
||||||
handler := NewNMockHandler(s.T())
|
handler := NewNMockHandler(s.T())
|
||||||
handler.EXPECT().GetCollection(context.Background(), collID).Return(&collectionInfo{
|
handler.EXPECT().GetCollection(context.Background(), collID).Return(&collectionInfo{
|
||||||
@ -579,7 +578,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
s.Run("set target segment failed", func() {
|
s.Run("set target segment failed", func() {
|
||||||
catalog := catalogmocks.NewDataCoordCatalog(s.T())
|
catalog := catalogmocks.NewDataCoordCatalog(s.T())
|
||||||
s.mt.catalog = catalog
|
s.mt.catalog = catalog
|
||||||
catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("mock error"))
|
catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||||
s.Error(st.SetJobInfo(s.mt))
|
s.Error(st.SetJobInfo(s.mt))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -588,7 +587,7 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
|||||||
s.mt.catalog = catalog
|
s.mt.catalog = catalog
|
||||||
s.mt.statsTaskMeta.catalog = catalog
|
s.mt.statsTaskMeta.catalog = catalog
|
||||||
catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
catalog.EXPECT().AlterSegments(mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error"))
|
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||||
|
|
||||||
s.Error(st.SetJobInfo(s.mt))
|
s.Error(st.SetJobInfo(s.mt))
|
||||||
})
|
})
|
||||||
|
|||||||
@ -1364,7 +1364,7 @@ func (t *clusteringCompactionTask) checkBuffersAfterCompaction() error {
|
|||||||
log.Warn("there are some binlogs have leaked, please check", zap.Int("buffer id", buffer.id),
|
log.Warn("there are some binlogs have leaked, please check", zap.Int("buffer id", buffer.id),
|
||||||
zap.Int64s("leak segments", lo.Keys(buffer.flushedBinlogs)))
|
zap.Int64s("leak segments", lo.Keys(buffer.flushedBinlogs)))
|
||||||
log.Debug("leak binlogs", zap.Any("buffer flushedBinlogs", buffer.flushedBinlogs))
|
log.Debug("leak binlogs", zap.Any("buffer flushedBinlogs", buffer.flushedBinlogs))
|
||||||
return fmt.Errorf("there are some binlogs have leaked")
|
return errors.New("there are some binlogs have leaked")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -18,7 +18,6 @@ package compaction
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -487,7 +486,7 @@ func (s *ClusteringCompactionTaskSuite) TestGenerateBM25Stats() {
|
|||||||
s.Run("alloc ID failed", func() {
|
s.Run("alloc ID failed", func() {
|
||||||
segmentID := int64(1)
|
segmentID := int64(1)
|
||||||
mockAlloc := allocator.NewMockAllocator(s.T())
|
mockAlloc := allocator.NewMockAllocator(s.T())
|
||||||
mockAlloc.EXPECT().Alloc(mock.Anything).Return(0, 0, fmt.Errorf("mock error")).Once()
|
mockAlloc.EXPECT().Alloc(mock.Anything).Return(0, 0, errors.New("mock error")).Once()
|
||||||
|
|
||||||
task := &clusteringCompactionTask{
|
task := &clusteringCompactionTask{
|
||||||
collectionID: 111,
|
collectionID: 111,
|
||||||
@ -507,7 +506,7 @@ func (s *ClusteringCompactionTaskSuite) TestGenerateBM25Stats() {
|
|||||||
s.Run("upload failed", func() {
|
s.Run("upload failed", func() {
|
||||||
segmentID := int64(1)
|
segmentID := int64(1)
|
||||||
mockBinlogIO := io.NewMockBinlogIO(s.T())
|
mockBinlogIO := io.NewMockBinlogIO(s.T())
|
||||||
mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
|
|
||||||
task := &clusteringCompactionTask{
|
task := &clusteringCompactionTask{
|
||||||
collectionID: 111,
|
collectionID: 111,
|
||||||
@ -544,7 +543,7 @@ func (s *ClusteringCompactionTaskSuite) TestGeneratePkStats() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
s.Run("download binlogs failed", func() {
|
s.Run("download binlogs failed", func() {
|
||||||
s.mockBinlogIO.EXPECT().Download(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
s.mockBinlogIO.EXPECT().Download(mock.Anything, mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
task := &clusteringCompactionTask{
|
task := &clusteringCompactionTask{
|
||||||
binlogIO: s.mockBinlogIO,
|
binlogIO: s.mockBinlogIO,
|
||||||
primaryKeyField: pkField,
|
primaryKeyField: pkField,
|
||||||
@ -584,7 +583,7 @@ func (s *ClusteringCompactionTaskSuite) TestGeneratePkStats() {
|
|||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
mockBinlogIO := io.NewMockBinlogIO(s.T())
|
mockBinlogIO := io.NewMockBinlogIO(s.T())
|
||||||
mockBinlogIO.EXPECT().Download(mock.Anything, mock.Anything).Return(lo.Values(kvs), nil)
|
mockBinlogIO.EXPECT().Download(mock.Anything, mock.Anything).Return(lo.Values(kvs), nil)
|
||||||
mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error"))
|
mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||||
task := &clusteringCompactionTask{
|
task := &clusteringCompactionTask{
|
||||||
collectionID: CollectionID,
|
collectionID: CollectionID,
|
||||||
partitionID: PartitionID,
|
partitionID: PartitionID,
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/apache/arrow/go/v12/arrow/array"
|
"github.com/apache/arrow/go/v12/arrow/array"
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -423,7 +424,7 @@ func (w *SegmentWriter) WriteRecord(r storage.Record) error {
|
|||||||
for fieldID, stats := range w.bm25Stats {
|
for fieldID, stats := range w.bm25Stats {
|
||||||
field, ok := r.Column(fieldID).(*array.Binary)
|
field, ok := r.Column(fieldID).(*array.Binary)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("bm25 field value not found")
|
return errors.New("bm25 field value not found")
|
||||||
}
|
}
|
||||||
stats.AppendBytes(field.Value(i))
|
stats.AppendBytes(field.Value(i))
|
||||||
}
|
}
|
||||||
@ -446,12 +447,12 @@ func (w *SegmentWriter) Write(v *storage.Value) error {
|
|||||||
for fieldID, stats := range w.bm25Stats {
|
for fieldID, stats := range w.bm25Stats {
|
||||||
data, ok := v.Value.(map[storage.FieldID]interface{})[fieldID]
|
data, ok := v.Value.(map[storage.FieldID]interface{})[fieldID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("bm25 field value not found")
|
return errors.New("bm25 field value not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes, ok := data.([]byte)
|
bytes, ok := data.([]byte)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("bm25 field value not sparse bytes")
|
return errors.New("bm25 field value not sparse bytes")
|
||||||
}
|
}
|
||||||
stats.AppendBytes(bytes)
|
stats.AppendBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,7 +18,6 @@ package grpcdatacoordclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -58,7 +57,7 @@ type Client struct {
|
|||||||
func NewClient(ctx context.Context) (types.DataCoordClient, error) {
|
func NewClient(ctx context.Context) (types.DataCoordClient, error) {
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("DataCoordClient NewClient failed", zap.Error(err))
|
log.Ctx(ctx).Debug("DataCoordClient NewClient failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -102,7 +101,7 @@ func (c *Client) getDataCoordAddr() (string, error) {
|
|||||||
ms, ok := msess[key]
|
ms, ok := msess[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Debug("DataCoordClient, not existed in msess ", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
log.Debug("DataCoordClient, not existed in msess ", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
||||||
return "", fmt.Errorf("find no available datacoord, check datacoord state")
|
return "", errors.New("find no available datacoord, check datacoord state")
|
||||||
}
|
}
|
||||||
log.Debug("DataCoordClient GetSessions success",
|
log.Debug("DataCoordClient GetSessions success",
|
||||||
zap.String("address", ms.Address),
|
zap.String("address", ms.Address),
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -52,11 +53,11 @@ type Client struct {
|
|||||||
// NewClient creates a client for DataNode.
|
// NewClient creates a client for DataNode.
|
||||||
func NewClient(ctx context.Context, addr string, serverID int64) (types.DataNodeClient, error) {
|
func NewClient(ctx context.Context, addr string, serverID int64) (types.DataNodeClient, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return nil, fmt.Errorf("address is empty")
|
return nil, errors.New("address is empty")
|
||||||
}
|
}
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("DataNodeClient New Etcd Session failed", zap.Error(err))
|
log.Ctx(ctx).Debug("DataNodeClient New Etcd Session failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -50,11 +51,11 @@ type Client struct {
|
|||||||
// NewClient creates a new IndexNode client.
|
// NewClient creates a new IndexNode client.
|
||||||
func NewClient(ctx context.Context, addr string, nodeID int64, encryption bool) (types.IndexNodeClient, error) {
|
func NewClient(ctx context.Context, addr string, nodeID int64, encryption bool) (types.IndexNodeClient, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return nil, fmt.Errorf("address is empty")
|
return nil, errors.New("address is empty")
|
||||||
}
|
}
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("IndexNodeClient New Etcd Session failed", zap.Error(err))
|
log.Ctx(ctx).Debug("IndexNodeClient New Etcd Session failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -50,11 +51,11 @@ type Client struct {
|
|||||||
// NewClient creates a new client instance
|
// NewClient creates a new client instance
|
||||||
func NewClient(ctx context.Context, addr string, nodeID int64) (types.ProxyClient, error) {
|
func NewClient(ctx context.Context, addr string, nodeID int64) (types.ProxyClient, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return nil, fmt.Errorf("address is empty")
|
return nil, errors.New("address is empty")
|
||||||
}
|
}
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("Proxy client new session failed", zap.Error(err))
|
log.Ctx(ctx).Debug("Proxy client new session failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,6 +25,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
@ -2043,9 +2044,9 @@ func validateTestCases(t *testing.T, testEngine *gin.Engine, queryTestCases []re
|
|||||||
returnBody := &ReturnErrMsg{}
|
returnBody := &ReturnErrMsg{}
|
||||||
err := json.Unmarshal(w.Body.Bytes(), returnBody)
|
err := json.Unmarshal(w.Body.Bytes(), returnBody)
|
||||||
assert.Nil(t, err, "case %d: ", i)
|
assert.Nil(t, err, "case %d: ", i)
|
||||||
assert.Equal(t, testcase.errCode, returnBody.Code, "case %d: ", i, string(testcase.requestBody))
|
assert.Equal(t, testcase.errCode, returnBody.Code, "case: %d, request body: %s ", i, string(testcase.requestBody))
|
||||||
if testcase.errCode != 0 {
|
if testcase.errCode != 0 {
|
||||||
assert.Equal(t, testcase.errMsg, returnBody.Message, "case %d: ", i, string(testcase.requestBody))
|
assert.Contains(t, returnBody.Message, testcase.errMsg, "case: %d, request body: %s", i, string(testcase.requestBody))
|
||||||
}
|
}
|
||||||
fmt.Println(w.Body.String())
|
fmt.Println(w.Body.String())
|
||||||
})
|
})
|
||||||
@ -2069,7 +2070,7 @@ func TestDML(t *testing.T) {
|
|||||||
if matchCountRule(req.OutputFields) {
|
if matchCountRule(req.OutputFields) {
|
||||||
for _, pair := range req.QueryParams {
|
for _, pair := range req.QueryParams {
|
||||||
if pair.GetKey() == ParamLimit {
|
if pair.GetKey() == ParamLimit {
|
||||||
return nil, fmt.Errorf("mock error")
|
return nil, errors.New("mock error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2448,7 +2449,7 @@ func TestSearchV2(t *testing.T) {
|
|||||||
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
||||||
path: SearchAction,
|
path: SearchAction,
|
||||||
requestBody: []byte(`{"collectionName": "book", "data": [["0.1", "0.2"]], "filter": "book_id in [2, 4, 6, 8]", "limit": 4, "outputFields": ["word_count"], "params": {"radius":0.9, "range_filter": 0.1}, "groupingField": "test"}`),
|
requestBody: []byte(`{"collectionName": "book", "data": [["0.1", "0.2"]], "filter": "book_id in [2, 4, 6, 8]", "limit": 4, "outputFields": ["word_count"], "params": {"radius":0.9, "range_filter": 0.1}, "groupingField": "test"}`),
|
||||||
errMsg: "can only accept json format request, error: Mismatch type float32 with value string \"at index 9: mismatched type with value\\n\\n\\t[[\\\"0.1\\\", \\\"0.2\\\"]]\\n\\t.........^......\\n\": invalid parameter[expected=FloatVector][actual=[[\"0.1\", \"0.2\"]]]",
|
errMsg: "can only accept json format request, error: Mismatch type float32 with value",
|
||||||
errCode: 1801,
|
errCode: 1801,
|
||||||
})
|
})
|
||||||
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
||||||
@ -2490,7 +2491,7 @@ func TestSearchV2(t *testing.T) {
|
|||||||
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
||||||
path: SearchAction,
|
path: SearchAction,
|
||||||
requestBody: []byte(`{"collectionName": "book", "data": [[0.1, 0.2]], "annsField": "binaryVector", "filter": "book_id in [2, 4, 6, 8]", "limit": 4, "outputFields": ["word_count"]}`),
|
requestBody: []byte(`{"collectionName": "book", "data": [[0.1, 0.2]], "annsField": "binaryVector", "filter": "book_id in [2, 4, 6, 8]", "limit": 4, "outputFields": ["word_count"]}`),
|
||||||
errMsg: "can only accept json format request, error: Mismatch type uint8 with value number \"at index 7: mismatched type with value\\n\\n\\t[[0.1, 0.2]]\\n\\t.......^....\\n\": invalid parameter[expected=BinaryVector][actual=[[0.1, 0.2]]]",
|
errMsg: "can only accept json format request, error: Mismatch type uint8",
|
||||||
errCode: 1801,
|
errCode: 1801,
|
||||||
})
|
})
|
||||||
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
queryTestCases = append(queryTestCases, requestBodyTestCase{
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/spf13/cast"
|
"github.com/spf13/cast"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
@ -710,7 +711,7 @@ func convertToIntArray(dataType schemapb.DataType, arr interface{}) []int32 {
|
|||||||
func anyToColumns(rows []map[string]interface{}, validDataMap map[string][]bool, sch *schemapb.CollectionSchema, inInsert bool) ([]*schemapb.FieldData, error) {
|
func anyToColumns(rows []map[string]interface{}, validDataMap map[string][]bool, sch *schemapb.CollectionSchema, inInsert bool) ([]*schemapb.FieldData, error) {
|
||||||
rowsLen := len(rows)
|
rowsLen := len(rows)
|
||||||
if rowsLen == 0 {
|
if rowsLen == 0 {
|
||||||
return []*schemapb.FieldData{}, fmt.Errorf("no row need to be convert to columns")
|
return []*schemapb.FieldData{}, errors.New("no row need to be convert to columns")
|
||||||
}
|
}
|
||||||
|
|
||||||
isDynamic := sch.EnableDynamicField
|
isDynamic := sch.EnableDynamicField
|
||||||
@ -1292,7 +1293,7 @@ func buildQueryResp(rowsNum int64, needFields []string, fieldDataList []*schemap
|
|||||||
stringPks := ids.GetStrId().GetData()
|
stringPks := ids.GetStrId().GetData()
|
||||||
rowsNum = int64(len(stringPks))
|
rowsNum = int64(len(stringPks))
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("the type of primary key(id) is not supported, use other sdk please")
|
return nil, errors.New("the type of primary key(id) is not supported, use other sdk please")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1427,7 +1428,7 @@ func buildQueryResp(rowsNum int64, needFields []string, fieldDataList []*schemap
|
|||||||
stringPks := ids.GetStrId().GetData()
|
stringPks := ids.GetStrId().GetData()
|
||||||
row[DefaultPrimaryFieldName] = stringPks[i]
|
row[DefaultPrimaryFieldName] = stringPks[i]
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("the type of primary key(id) is not supported, use other sdk please")
|
return nil, errors.New("the type of primary key(id) is not supported, use other sdk please")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if scores != nil && int64(len(scores)) > i {
|
if scores != nil && int64(len(scores)) > i {
|
||||||
@ -1804,7 +1805,8 @@ func getTemplateType(value interface{}) schemapb.DataType {
|
|||||||
case []interface{}:
|
case []interface{}:
|
||||||
return schemapb.DataType_Array
|
return schemapb.DataType_Array
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unexpected data(%v) when getTemplateType, please check it!", value))
|
log.Panic(fmt.Sprintf("Unexpected data(%v) when getTemplateType, please check it!", value))
|
||||||
|
return schemapb.DataType_None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -17,6 +17,7 @@
|
|||||||
package httpserver
|
package httpserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -1041,14 +1042,14 @@ func TestConvertQueries2Placeholder(t *testing.T) {
|
|||||||
dataType,
|
dataType,
|
||||||
0,
|
0,
|
||||||
func() [][]byte {
|
func() [][]byte {
|
||||||
return [][]byte{nil, nil}
|
return [][]byte{{}, {}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
`"{"data": [""]}"`,
|
`"{"data": [""]}"`,
|
||||||
dataType,
|
dataType,
|
||||||
0,
|
0,
|
||||||
func() [][]byte {
|
func() [][]byte {
|
||||||
return [][]byte{nil}
|
return [][]byte{{}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -1113,7 +1114,8 @@ func TestConvertQueries2Placeholder(t *testing.T) {
|
|||||||
for _, testcase := range testCases {
|
for _, testcase := range testCases {
|
||||||
phv, err := convertQueries2Placeholder(testcase.requestBody, testcase.dataType, testcase.dim)
|
phv, err := convertQueries2Placeholder(testcase.requestBody, testcase.dataType, testcase.dim)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, testcase.placehoderValue(), phv.GetValues())
|
assert.Equal(t, testcase.placehoderValue(), phv.GetValues(),
|
||||||
|
fmt.Sprintf("check equal fail, data: %s, type: %s, dim: %d", testcase.requestBody, testcase.dataType, testcase.dim))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testcase := range []testCase{
|
for _, testcase := range []testCase{
|
||||||
@ -1140,7 +1142,8 @@ func TestConvertQueries2Placeholder(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
phv, err := convertQueries2Placeholder(testcase.requestBody, testcase.dataType, testcase.dim)
|
phv, err := convertQueries2Placeholder(testcase.requestBody, testcase.dataType, testcase.dim)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotEqual(t, testcase.placehoderValue(), phv.GetValues())
|
assert.NotEqual(t, testcase.placehoderValue(), phv.GetValues(),
|
||||||
|
fmt.Sprintf("check not equal fail, data: %s, type: %s, dim: %d", testcase.requestBody, testcase.dataType, testcase.dim))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testcase := range []testCase{
|
for _, testcase := range []testCase{
|
||||||
|
|||||||
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
@ -136,7 +135,7 @@ func newHTTPListner(ctx context.Context, l *listenerManager) error {
|
|||||||
}
|
}
|
||||||
if !certPool.AppendCertsFromPEM(rootBuf) {
|
if !certPool.AppendCertsFromPEM(rootBuf) {
|
||||||
log.Warn("fail to append ca to cert")
|
log.Warn("fail to append ca to cert")
|
||||||
return fmt.Errorf("fail to append ca to cert")
|
return errors.New("fail to append ca to cert")
|
||||||
}
|
}
|
||||||
tlsConf = &tls.Config{
|
tlsConf = &tls.Config{
|
||||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||||
|
|||||||
@ -29,6 +29,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||||
"github.com/soheilhy/cmux"
|
"github.com/soheilhy/cmux"
|
||||||
@ -301,7 +302,7 @@ func (s *Server) startExternalGrpc(errChan chan error) {
|
|||||||
}
|
}
|
||||||
if !certPool.AppendCertsFromPEM(rootBuf) {
|
if !certPool.AppendCertsFromPEM(rootBuf) {
|
||||||
log.Warn("fail to append ca to cert")
|
log.Warn("fail to append ca to cert")
|
||||||
errChan <- fmt.Errorf("fail to append ca to cert")
|
errChan <- errors.New("fail to append ca to cert")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -719,7 +719,7 @@ func TestServer_Check(t *testing.T) {
|
|||||||
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
|
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
|
||||||
|
|
||||||
mockProxy.ExpectedCalls = nil
|
mockProxy.ExpectedCalls = nil
|
||||||
mockProxy.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock grpc unexpected error"))
|
mockProxy.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(nil, errors.New("mock grpc unexpected error"))
|
||||||
|
|
||||||
ret, err = server.Check(ctx, req)
|
ret, err = server.Check(ctx, req)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@ -773,7 +773,7 @@ func TestServer_Watch(t *testing.T) {
|
|||||||
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
|
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, ret.Status)
|
||||||
|
|
||||||
mockProxy.ExpectedCalls = nil
|
mockProxy.ExpectedCalls = nil
|
||||||
mockProxy.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock grpc unexpected error"))
|
mockProxy.EXPECT().GetComponentStates(mock.Anything, mock.Anything).Return(nil, errors.New("mock grpc unexpected error"))
|
||||||
|
|
||||||
err = server.Watch(req, watchServer)
|
err = server.Watch(req, watchServer)
|
||||||
ret = <-resultChan
|
ret = <-resultChan
|
||||||
|
|||||||
@ -18,8 +18,8 @@ package grpcquerycoordclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ type Client struct {
|
|||||||
func NewClient(ctx context.Context) (types.QueryCoordClient, error) {
|
func NewClient(ctx context.Context) (types.QueryCoordClient, error) {
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("QueryCoordClient NewClient failed", zap.Error(err))
|
log.Ctx(ctx).Debug("QueryCoordClient NewClient failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ func (c *Client) getQueryCoordAddr() (string, error) {
|
|||||||
ms, ok := msess[key]
|
ms, ok := msess[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Ctx(c.ctx).Debug("QueryCoordClient msess key not existed", zap.Any("key", key))
|
log.Ctx(c.ctx).Debug("QueryCoordClient msess key not existed", zap.Any("key", key))
|
||||||
return "", fmt.Errorf("find no available querycoord, check querycoord state")
|
return "", errors.New("find no available querycoord, check querycoord state")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Ctx(c.ctx).Debug("QueryCoordClient GetSessions success",
|
log.Ctx(c.ctx).Debug("QueryCoordClient GetSessions success",
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -52,11 +53,11 @@ type Client struct {
|
|||||||
// NewClient creates a new QueryNode client.
|
// NewClient creates a new QueryNode client.
|
||||||
func NewClient(ctx context.Context, addr string, nodeID int64) (types.QueryNodeClient, error) {
|
func NewClient(ctx context.Context, addr string, nodeID int64) (types.QueryNodeClient, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return nil, fmt.Errorf("addr is empty")
|
return nil, errors.New("addr is empty")
|
||||||
}
|
}
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("QueryNodeClient NewClient failed", zap.Error(err))
|
log.Ctx(ctx).Debug("QueryNodeClient NewClient failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,8 +18,8 @@ package grpcrootcoordclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
grpcCodes "google.golang.org/grpc/codes"
|
grpcCodes "google.golang.org/grpc/codes"
|
||||||
@ -58,7 +58,7 @@ type Client struct {
|
|||||||
func NewClient(ctx context.Context) (types.RootCoordClient, error) {
|
func NewClient(ctx context.Context) (types.RootCoordClient, error) {
|
||||||
sess := sessionutil.NewSession(ctx)
|
sess := sessionutil.NewSession(ctx)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
err := errors.New("new session error, maybe can not connect to etcd")
|
||||||
log.Ctx(ctx).Debug("New RootCoord Client failed", zap.Error(err))
|
log.Ctx(ctx).Debug("New RootCoord Client failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (c *Client) getRootCoordAddr() (string, error) {
|
|||||||
ms, ok := msess[key]
|
ms, ok := msess[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn("RootCoordClient mess key not exist", zap.Any("key", key))
|
log.Warn("RootCoordClient mess key not exist", zap.Any("key", key))
|
||||||
return "", fmt.Errorf("find no available rootcoord, check rootcoord state")
|
return "", errors.New("find no available rootcoord, check rootcoord state")
|
||||||
}
|
}
|
||||||
log.Debug("RootCoordClient GetSessions success",
|
log.Debug("RootCoordClient GetSessions success",
|
||||||
zap.String("address", ms.Address),
|
zap.String("address", ms.Address),
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/tikv/client-go/v2/txnkv"
|
"github.com/tikv/client-go/v2/txnkv"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
@ -115,7 +116,7 @@ func (m *mockCore) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockCore) Stop() error {
|
func (m *mockCore) Stop() error {
|
||||||
return fmt.Errorf("stop error")
|
return errors.New("stop error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockCore) GracefulStop() {
|
func (m *mockCore) GracefulStop() {
|
||||||
|
|||||||
@ -19,6 +19,7 @@ package pipeline
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -103,7 +104,7 @@ func (eNode *embeddingNode) bm25Embedding(runner function.FunctionRunner, data *
|
|||||||
|
|
||||||
sparseArray, ok := output[0].(*schemapb.SparseFloatArray)
|
sparseArray, ok := output[0].(*schemapb.SparseFloatArray)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("BM25 embedding failed: BM25 runner output not sparse map")
|
return errors.New("BM25 embedding failed: BM25 runner output not sparse map")
|
||||||
}
|
}
|
||||||
|
|
||||||
meta[outputFieldId].AppendBytes(sparseArray.GetContents()...)
|
meta[outputFieldId].AppendBytes(sparseArray.GetContents()...)
|
||||||
|
|||||||
@ -20,7 +20,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
@ -833,7 +832,7 @@ func (m *RootCoordFactory) AllocTimestamp(ctx context.Context, in *rootcoordpb.A
|
|||||||
v := ctx.Value(ctxKey{})
|
v := ctx.Value(ctxKey{})
|
||||||
if v != nil && v.(string) == returnError {
|
if v != nil && v.(string) == returnError {
|
||||||
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
||||||
return resp, fmt.Errorf("injected error")
|
return resp, errors.New("injected error")
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
@ -876,7 +875,7 @@ func (m *RootCoordFactory) ShowPartitions(ctx context.Context, req *milvuspb.Sho
|
|||||||
if m.ShowPartitionsErr {
|
if m.ShowPartitionsErr {
|
||||||
return &milvuspb.ShowPartitionsResponse{
|
return &milvuspb.ShowPartitionsResponse{
|
||||||
Status: merr.Success(),
|
Status: merr.Success(),
|
||||||
}, fmt.Errorf("mock show partitions error")
|
}, errors.New("mock show partitions error")
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.ShowPartitionsNotSuccess {
|
if m.ShowPartitionsNotSuccess {
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -101,7 +102,7 @@ func (mgr *syncManager) resizeHandler(evt *config.Event) {
|
|||||||
|
|
||||||
func (mgr *syncManager) SyncData(ctx context.Context, task Task, callbacks ...func(error) error) (*conc.Future[struct{}], error) {
|
func (mgr *syncManager) SyncData(ctx context.Context, task Task, callbacks ...func(error) error) (*conc.Future[struct{}], error) {
|
||||||
if mgr.workerPool.IsClosed() {
|
if mgr.workerPool.IsClosed() {
|
||||||
return nil, fmt.Errorf("sync manager is closed")
|
return nil, errors.New("sync manager is closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t := task.(type) {
|
switch t := task.(type) {
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"golang.org/x/exp/mmap"
|
"golang.org/x/exp/mmap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
@ -28,7 +29,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNotImplErr = fmt.Errorf("not implemented error")
|
errNotImplErr = errors.New("not implemented error")
|
||||||
|
|
||||||
collschema = &schemapb.CollectionSchema{
|
collschema = &schemapb.CollectionSchema{
|
||||||
Name: "mock_collection",
|
Name: "mock_collection",
|
||||||
@ -125,7 +126,7 @@ func (c *mockChunkmgr) Exist(ctx context.Context, filePath string) (bool, error)
|
|||||||
func (c *mockChunkmgr) Read(ctx context.Context, filePath string) ([]byte, error) {
|
func (c *mockChunkmgr) Read(ctx context.Context, filePath string) ([]byte, error) {
|
||||||
value, ok := c.segmentData.Load(filePath)
|
value, ok := c.segmentData.Load(filePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("data not exists")
|
return nil, errors.New("data not exists")
|
||||||
}
|
}
|
||||||
return value.(*storage.Blob).Value, nil
|
return value.(*storage.Blob).Value, nil
|
||||||
}
|
}
|
||||||
@ -224,7 +225,7 @@ func (f *mockFactory) NewPersistentStorageChunkManager(context.Context) (storage
|
|||||||
if f.chunkMgr != nil {
|
if f.chunkMgr != nil {
|
||||||
return f.chunkMgr, nil
|
return f.chunkMgr, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("factory not inited")
|
return nil, errors.New("factory not inited")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *mockFactory) Init(*paramtable.ComponentParam) {
|
func (f *mockFactory) Init(*paramtable.ComponentParam) {
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
@ -544,7 +545,7 @@ func (i *IndexNode) QueryJobsV2(ctx context.Context, req *workerpb.QueryJobsV2Re
|
|||||||
default:
|
default:
|
||||||
log.Warn("IndexNode receive querying unknown type jobs")
|
log.Warn("IndexNode receive querying unknown type jobs")
|
||||||
return &workerpb.QueryJobsV2Response{
|
return &workerpb.QueryJobsV2Response{
|
||||||
Status: merr.Status(fmt.Errorf("IndexNode receive querying unknown type jobs")),
|
Status: merr.Status(errors.New("IndexNode receive querying unknown type jobs")),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -605,6 +606,6 @@ func (i *IndexNode) DropJobsV2(ctx context.Context, req *workerpb.DropJobsV2Requ
|
|||||||
return merr.Success(), nil
|
return merr.Success(), nil
|
||||||
default:
|
default:
|
||||||
log.Warn("IndexNode receive dropping unknown type jobs")
|
log.Warn("IndexNode receive dropping unknown type jobs")
|
||||||
return merr.Status(fmt.Errorf("IndexNode receive dropping unknown type jobs")), nil
|
return merr.Status(errors.New("IndexNode receive dropping unknown type jobs")), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,14 +18,15 @@ package indexnode
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errCancel = fmt.Errorf("canceled")
|
errCancel = errors.New("canceled")
|
||||||
diskUsageRatio = 4.0
|
diskUsageRatio = 4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
@ -49,7 +50,7 @@ func (s *stagectx) Done() <-chan struct{} {
|
|||||||
func (s *stagectx) Err() error {
|
func (s *stagectx) Err() error {
|
||||||
select {
|
select {
|
||||||
case <-s.ch:
|
case <-s.ch:
|
||||||
return fmt.Errorf("canceled")
|
return errors.New("canceled")
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -173,7 +174,7 @@ func TestIndexTaskScheduler(t *testing.T) {
|
|||||||
newTask(fakeTaskPrepared, nil, indexpb.JobState_JobStateRetry),
|
newTask(fakeTaskPrepared, nil, indexpb.JobState_JobStateRetry),
|
||||||
newTask(fakeTaskBuiltIndex, nil, indexpb.JobState_JobStateRetry),
|
newTask(fakeTaskBuiltIndex, nil, indexpb.JobState_JobStateRetry),
|
||||||
newTask(fakeTaskSavedIndexes, nil, indexpb.JobState_JobStateFinished),
|
newTask(fakeTaskSavedIndexes, nil, indexpb.JobState_JobStateFinished),
|
||||||
newTask(fakeTaskSavedIndexes, map[fakeTaskState]error{fakeTaskSavedIndexes: fmt.Errorf("auth failed")}, indexpb.JobState_JobStateRetry))
|
newTask(fakeTaskSavedIndexes, map[fakeTaskState]error{fakeTaskSavedIndexes: errors.New("auth failed")}, indexpb.JobState_JobStateRetry))
|
||||||
|
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
assert.Nil(t, scheduler.TaskQueue.Enqueue(task))
|
assert.Nil(t, scheduler.TaskQueue.Enqueue(task))
|
||||||
|
|||||||
@ -25,6 +25,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -179,8 +180,8 @@ func (st *statsTask) PreExecute(ctx context.Context) error {
|
|||||||
func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, error) {
|
func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, error) {
|
||||||
numRows := st.req.GetNumRows()
|
numRows := st.req.GetNumRows()
|
||||||
|
|
||||||
bm25FieldIds := compaction.GetBM25FieldIDs(st.req.GetSchema())
|
bm25FieldIDs := compaction.GetBM25FieldIDs(st.req.GetSchema())
|
||||||
writer, err := compaction.NewSegmentWriter(st.req.GetSchema(), numRows, statsBatchSize, st.req.GetTargetSegmentID(), st.req.GetPartitionID(), st.req.GetCollectionID(), bm25FieldIds)
|
writer, err := compaction.NewSegmentWriter(st.req.GetSchema(), numRows, statsBatchSize, st.req.GetTargetSegmentID(), st.req.GetPartitionID(), st.req.GetCollectionID(), bm25FieldIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Warn("sort segment wrong, unable to init segment writer",
|
log.Ctx(ctx).Warn("sort segment wrong, unable to init segment writer",
|
||||||
zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err))
|
zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err))
|
||||||
@ -199,7 +200,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er
|
|||||||
)
|
)
|
||||||
|
|
||||||
downloadStart := time.Now()
|
downloadStart := time.Now()
|
||||||
values, err := st.downloadData(ctx, numRows, writer.GetPkID(), bm25FieldIds)
|
values, err := st.downloadData(ctx, numRows, writer.GetPkID(), bm25FieldIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Warn("download data failed", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err))
|
log.Ctx(ctx).Warn("download data failed", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -237,7 +238,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er
|
|||||||
log.Ctx(ctx).Warn("binlog files too much, log is not enough", zap.Int64("taskID", st.req.GetTaskID()),
|
log.Ctx(ctx).Warn("binlog files too much, log is not enough", zap.Int64("taskID", st.req.GetTaskID()),
|
||||||
zap.Int64("binlog num", binlogNum), zap.Int64("startLogID", st.req.GetStartLogID()),
|
zap.Int64("binlog num", binlogNum), zap.Int64("startLogID", st.req.GetStartLogID()),
|
||||||
zap.Int64("endLogID", st.req.GetEndLogID()), zap.Int64("logIDOffset", st.logIDOffset))
|
zap.Int64("endLogID", st.req.GetEndLogID()), zap.Int64("logIDOffset", st.logIDOffset))
|
||||||
return nil, fmt.Errorf("binlog files too much, log is not enough")
|
return nil, errors.New("binlog files too much, log is not enough")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,7 +276,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er
|
|||||||
st.logIDOffset += binlogNums
|
st.logIDOffset += binlogNums
|
||||||
|
|
||||||
var bm25StatsLogs []*datapb.FieldBinlog
|
var bm25StatsLogs []*datapb.FieldBinlog
|
||||||
if len(bm25FieldIds) > 0 {
|
if len(bm25FieldIDs) > 0 {
|
||||||
binlogNums, bm25StatsLogs, err = bm25SerializeWrite(ctx, st.req.GetStorageConfig().GetRootPath(), st.binlogIO, st.req.GetStartLogID()+st.logIDOffset, writer, numRows)
|
binlogNums, bm25StatsLogs, err = bm25SerializeWrite(ctx, st.req.GetStorageConfig().GetRootPath(), st.binlogIO, st.req.GetStartLogID()+st.logIDOffset, writer, numRows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Warn("compact wrong, failed to serialize write segment bm25 stats", zap.Error(err))
|
log.Ctx(ctx).Warn("compact wrong, failed to serialize write segment bm25 stats", zap.Error(err))
|
||||||
@ -400,14 +401,14 @@ func (st *statsTask) Reset() {
|
|||||||
st.node = nil
|
st.node = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *statsTask) downloadData(ctx context.Context, numRows int64, PKFieldID int64, bm25FieldIds []int64) ([]*storage.Value, error) {
|
func (st *statsTask) downloadData(ctx context.Context, numRows int64, PKFieldID int64, bm25FieldIDs []int64) ([]*storage.Value, error) {
|
||||||
log := log.Ctx(ctx).With(
|
log := log.Ctx(ctx).With(
|
||||||
zap.String("clusterID", st.req.GetClusterID()),
|
zap.String("clusterID", st.req.GetClusterID()),
|
||||||
zap.Int64("taskID", st.req.GetTaskID()),
|
zap.Int64("taskID", st.req.GetTaskID()),
|
||||||
zap.Int64("collectionID", st.req.GetCollectionID()),
|
zap.Int64("collectionID", st.req.GetCollectionID()),
|
||||||
zap.Int64("partitionID", st.req.GetPartitionID()),
|
zap.Int64("partitionID", st.req.GetPartitionID()),
|
||||||
zap.Int64("segmentID", st.req.GetSegmentID()),
|
zap.Int64("segmentID", st.req.GetSegmentID()),
|
||||||
zap.Int64s("bm25Fields", bm25FieldIds),
|
zap.Int64s("bm25Fields", bm25FieldIDs),
|
||||||
)
|
)
|
||||||
|
|
||||||
deletePKs, err := st.loadDeltalogs(ctx, st.deltaLogs)
|
deletePKs, err := st.loadDeltalogs(ctx, st.deltaLogs)
|
||||||
|
|||||||
@ -18,10 +18,10 @@ package indexnode
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -94,7 +94,7 @@ func (s *TaskStatsSuite) Testbm25SerializeWriteError() {
|
|||||||
|
|
||||||
s.Run("upload failed", func() {
|
s.Run("upload failed", func() {
|
||||||
s.schema = genCollectionSchemaWithBM25()
|
s.schema = genCollectionSchemaWithBM25()
|
||||||
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
s.GenSegmentWriterWithBM25(0)
|
s.GenSegmentWriterWithBM25(0)
|
||||||
_, _, err := bm25SerializeWrite(context.Background(), "root_path", s.mockBinlogIO, 0, s.segWriter, 1)
|
_, _, err := bm25SerializeWrite(context.Background(), "root_path", s.mockBinlogIO, 0, s.segWriter, 1)
|
||||||
s.Error(err)
|
s.Error(err)
|
||||||
@ -158,7 +158,7 @@ func (s *TaskStatsSuite) TestSortSegmentWithBM25() {
|
|||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
})
|
})
|
||||||
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(errors.New("mock error")).Once()
|
||||||
s.mockBinlogIO.EXPECT().AsyncUpload(mock.Anything, mock.Anything).Return(nil)
|
s.mockBinlogIO.EXPECT().AsyncUpload(mock.Anything, mock.Anything).Return(nil)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|||||||
@ -747,7 +747,7 @@ func (kv *txnTiKV) removeTiKVMeta(ctx context.Context, key string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv *txnTiKV) CompareVersionAndSwap(ctx context.Context, key string, version int64, target string) (bool, error) {
|
func (kv *txnTiKV) CompareVersionAndSwap(ctx context.Context, key string, version int64, target string) (bool, error) {
|
||||||
err := fmt.Errorf("Unimplemented! CompareVersionAndSwap is under deprecation")
|
err := errors.New("Unimplemented! CompareVersionAndSwap is under deprecation")
|
||||||
logWarnOnFailure(&err, "Unimplemented")
|
logWarnOnFailure(&err, "Unimplemented")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -301,7 +301,7 @@ func TestTiKVLoad(te *testing.T) {
|
|||||||
defer kv.Close()
|
defer kv.Close()
|
||||||
|
|
||||||
beginTxn = func(txn *txnkv.Client) (*transaction.KVTxn, error) {
|
beginTxn = func(txn *txnkv.Client) (*transaction.KVTxn, error) {
|
||||||
return nil, fmt.Errorf("bad txn!")
|
return nil, errors.New("bad txn!")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
beginTxn = tiTxnBegin
|
beginTxn = tiTxnBegin
|
||||||
@ -326,7 +326,7 @@ func TestTiKVLoad(te *testing.T) {
|
|||||||
defer kv.Close()
|
defer kv.Close()
|
||||||
|
|
||||||
commitTxn = func(ctx context.Context, txn *transaction.KVTxn) error {
|
commitTxn = func(ctx context.Context, txn *transaction.KVTxn) error {
|
||||||
return fmt.Errorf("bad txn commit!")
|
return errors.New("bad txn commit!")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
commitTxn = tiTxnCommit
|
commitTxn = tiTxnCommit
|
||||||
|
|||||||
@ -631,7 +631,7 @@ func (kc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Col
|
|||||||
|
|
||||||
func (kc *Catalog) alterModifyCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error {
|
func (kc *Catalog) alterModifyCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error {
|
||||||
if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID {
|
if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID {
|
||||||
return fmt.Errorf("altering tenant id or collection id is forbidden")
|
return errors.New("altering tenant id or collection id is forbidden")
|
||||||
}
|
}
|
||||||
oldCollClone := oldColl.Clone()
|
oldCollClone := oldColl.Clone()
|
||||||
oldCollClone.DBID = newColl.DBID
|
oldCollClone.DBID = newColl.DBID
|
||||||
@ -684,7 +684,7 @@ func (kc *Catalog) AlterCollection(ctx context.Context, oldColl *model.Collectio
|
|||||||
|
|
||||||
func (kc *Catalog) alterModifyPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error {
|
func (kc *Catalog) alterModifyPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error {
|
||||||
if oldPart.CollectionID != newPart.CollectionID || oldPart.PartitionID != newPart.PartitionID {
|
if oldPart.CollectionID != newPart.CollectionID || oldPart.PartitionID != newPart.PartitionID {
|
||||||
return fmt.Errorf("altering collection id or partition id is forbidden")
|
return errors.New("altering collection id or partition id is forbidden")
|
||||||
}
|
}
|
||||||
oldPartClone := oldPart.Clone()
|
oldPartClone := oldPart.Clone()
|
||||||
newPartClone := newPart.Clone()
|
newPartClone := newPart.Clone()
|
||||||
@ -1103,7 +1103,7 @@ func (kc *Catalog) ListRole(ctx context.Context, tenant string, entity *milvuspb
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if funcutil.IsEmptyString(entity.Name) {
|
if funcutil.IsEmptyString(entity.Name) {
|
||||||
return results, fmt.Errorf("role name in the role entity is empty")
|
return results, errors.New("role name in the role entity is empty")
|
||||||
}
|
}
|
||||||
roleKey := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, entity.Name)
|
roleKey := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, entity.Name)
|
||||||
_, err := kc.Txn.Load(ctx, roleKey)
|
_, err := kc.Txn.Load(ctx, roleKey)
|
||||||
@ -1178,7 +1178,7 @@ func (kc *Catalog) ListUser(ctx context.Context, tenant string, entity *milvuspb
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if funcutil.IsEmptyString(entity.Name) {
|
if funcutil.IsEmptyString(entity.Name) {
|
||||||
return results, fmt.Errorf("username in the user entity is empty")
|
return results, errors.New("username in the user entity is empty")
|
||||||
}
|
}
|
||||||
_, err = kc.GetCredential(ctx, entity.Name)
|
_, err = kc.GetCredential(ctx, entity.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -1646,7 +1646,7 @@ func TestRBAC_Role(t *testing.T) {
|
|||||||
|
|
||||||
notExistKey = "not-exist"
|
notExistKey = "not-exist"
|
||||||
errorKey = "error"
|
errorKey = "error"
|
||||||
otherError = fmt.Errorf("mock load error")
|
otherError = errors.New("mock load error")
|
||||||
)
|
)
|
||||||
|
|
||||||
kvmock.EXPECT().Load(mock.Anything, notExistKey).Return("", merr.WrapErrIoKeyNotFound(notExistKey)).Once()
|
kvmock.EXPECT().Load(mock.Anything, notExistKey).Return("", merr.WrapErrIoKeyNotFound(notExistKey)).Once()
|
||||||
@ -1690,7 +1690,7 @@ func TestRBAC_Role(t *testing.T) {
|
|||||||
|
|
||||||
notExistKey = "not-exist"
|
notExistKey = "not-exist"
|
||||||
errorKey = "error"
|
errorKey = "error"
|
||||||
otherError = fmt.Errorf("mock load error")
|
otherError = errors.New("mock load error")
|
||||||
)
|
)
|
||||||
|
|
||||||
kvmock.EXPECT().Load(mock.Anything, notExistKey).Return("", merr.WrapErrIoKeyNotFound(notExistKey)).Once()
|
kvmock.EXPECT().Load(mock.Anything, notExistKey).Return("", merr.WrapErrIoKeyNotFound(notExistKey)).Once()
|
||||||
@ -1737,7 +1737,7 @@ func TestRBAC_Role(t *testing.T) {
|
|||||||
notExistPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, notExistName)
|
notExistPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, notExistName)
|
||||||
errorName = "error"
|
errorName = "error"
|
||||||
errorPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, errorName)
|
errorPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, errorName)
|
||||||
otherError = fmt.Errorf("mock load error")
|
otherError = errors.New("mock load error")
|
||||||
)
|
)
|
||||||
|
|
||||||
kvmock.EXPECT().Load(mock.Anything, notExistPath).Return("", merr.WrapErrIoKeyNotFound(notExistName)).Once()
|
kvmock.EXPECT().Load(mock.Anything, notExistPath).Return("", merr.WrapErrIoKeyNotFound(notExistName)).Once()
|
||||||
@ -3077,7 +3077,7 @@ func TestCatalog_AlterDatabase(t *testing.T) {
|
|||||||
func TestCatalog_listFunctionError(t *testing.T) {
|
func TestCatalog_listFunctionError(t *testing.T) {
|
||||||
mockSnapshot := newMockSnapshot(t)
|
mockSnapshot := newMockSnapshot(t)
|
||||||
kc := NewCatalog(nil, mockSnapshot).(*Catalog)
|
kc := NewCatalog(nil, mockSnapshot).(*Catalog)
|
||||||
mockSnapshot.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("mock error"))
|
mockSnapshot.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, errors.New("mock error"))
|
||||||
_, err := kc.listFunctions(context.TODO(), 1, 1)
|
_, err := kc.listFunctions(context.TODO(), 1, 1)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
|||||||
@ -1002,7 +1002,7 @@ func genDSLByIndexType(schema *schemapb.CollectionSchema, indexType string) (str
|
|||||||
} else if indexType == IndexHNSW {
|
} else if indexType == IndexHNSW {
|
||||||
return genHNSWDSL(schema, ef, defaultTopK, defaultRoundDecimal)
|
return genHNSWDSL(schema, ef, defaultTopK, defaultRoundDecimal)
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("Invalid indexType")
|
return "", errors.New("Invalid indexType")
|
||||||
}
|
}
|
||||||
|
|
||||||
func genBruteForceDSL(schema *schemapb.CollectionSchema, topK int64, roundDecimal int64) (string, error) {
|
func genBruteForceDSL(schema *schemapb.CollectionSchema, topK int64, roundDecimal int64) (string, error) {
|
||||||
@ -1093,7 +1093,7 @@ func CheckSearchResult(ctx context.Context, nq int64, plan *segcore.SearchPlan,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(blob) == 0 {
|
if len(blob) == 0 {
|
||||||
return fmt.Errorf("wrong search result data blobs when checkSearchResult")
|
return errors.New("wrong search result data blobs when checkSearchResult")
|
||||||
}
|
}
|
||||||
|
|
||||||
result := &schemapb.SearchResultData{}
|
result := &schemapb.SearchResultData{}
|
||||||
@ -1103,17 +1103,17 @@ func CheckSearchResult(ctx context.Context, nq int64, plan *segcore.SearchPlan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if result.TopK != sliceTopKs[i] {
|
if result.TopK != sliceTopKs[i] {
|
||||||
return fmt.Errorf("unexpected topK when checkSearchResult")
|
return errors.New("unexpected topK when checkSearchResult")
|
||||||
}
|
}
|
||||||
if result.NumQueries != sInfo.SliceNQs[i] {
|
if result.NumQueries != sInfo.SliceNQs[i] {
|
||||||
return fmt.Errorf("unexpected nq when checkSearchResult")
|
return errors.New("unexpected nq when checkSearchResult")
|
||||||
}
|
}
|
||||||
// search empty segment, return empty result.IDs
|
// search empty segment, return empty result.IDs
|
||||||
if len(result.Ids.IdField.(*schemapb.IDs_IntId).IntId.Data) <= 0 {
|
if len(result.Ids.IdField.(*schemapb.IDs_IntId).IntId.Data) <= 0 {
|
||||||
return fmt.Errorf("unexpected Ids when checkSearchResult")
|
return errors.New("unexpected Ids when checkSearchResult")
|
||||||
}
|
}
|
||||||
if len(result.Scores) <= 0 {
|
if len(result.Scores) <= 0 {
|
||||||
return fmt.Errorf("unexpected Scores when checkSearchResult")
|
return errors.New("unexpected Scores when checkSearchResult")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -4,6 +4,8 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/json"
|
"github.com/milvus-io/milvus/internal/json"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
||||||
@ -136,7 +138,7 @@ func ConvertToGenericValue(templateName string, templateValue *schemapb.Template
|
|||||||
case *schemapb.TemplateValue_ArrayVal:
|
case *schemapb.TemplateValue_ArrayVal:
|
||||||
return convertArrayValue(templateName, templateValue.GetArrayVal())
|
return convertArrayValue(templateName, templateValue.GetArrayVal())
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("expression elements can only be scalars")
|
return nil, errors.New("expression elements can only be scalars")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package planparserv2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/antlr4-go/antlr/v4"
|
"github.com/antlr4-go/antlr/v4"
|
||||||
)
|
)
|
||||||
@ -18,7 +17,7 @@ type errorListenerImpl struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *errorListenerImpl) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
|
func (l *errorListenerImpl) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
|
||||||
l.err = fmt.Errorf("line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) + " " + msg)
|
l.err = fmt.Errorf("line %d:%d %s", line, column, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *errorListenerImpl) Error() error {
|
func (l *errorListenerImpl) Error() error {
|
||||||
|
|||||||
@ -3,6 +3,8 @@ package planparserv2
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
@ -127,11 +129,11 @@ func FillBinaryRangeExpressionValue(expr *planpb.BinaryRangeExpr, templateValues
|
|||||||
|
|
||||||
if !(expr.GetLowerInclusive() && expr.GetUpperInclusive()) {
|
if !(expr.GetLowerInclusive() && expr.GetUpperInclusive()) {
|
||||||
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,7 +168,7 @@ func FillBinaryArithOpEvalRangeExpressionValue(expr *planpb.BinaryArithOpEvalRan
|
|||||||
}
|
}
|
||||||
|
|
||||||
if operand.GetArrayVal() != nil {
|
if operand.GetArrayVal() != nil {
|
||||||
return fmt.Errorf("can not comparisons array directly")
|
return errors.New("can not comparisons array directly")
|
||||||
}
|
}
|
||||||
|
|
||||||
dataType, err = getTargetType(lDataType, rDataType)
|
dataType, err = getTargetType(lDataType, rDataType)
|
||||||
|
|||||||
@ -1,9 +1,10 @@
|
|||||||
package planparserv2
|
package planparserv2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
parser "github.com/milvus-io/milvus/internal/parser/planparserv2/generated"
|
parser "github.com/milvus-io/milvus/internal/parser/planparserv2/generated"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
||||||
@ -182,21 +183,21 @@ func Divide(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if IsBool(a) || IsBool(b) {
|
if IsBool(a) || IsBool(b) {
|
||||||
return nil, fmt.Errorf("divide cannot apply on bool field")
|
return nil, errors.New("divide cannot apply on bool field")
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsString(a) || IsString(b) {
|
if IsString(a) || IsString(b) {
|
||||||
return nil, fmt.Errorf("divide cannot apply on string field")
|
return nil, errors.New("divide cannot apply on string field")
|
||||||
}
|
}
|
||||||
|
|
||||||
aFloat, bFloat, aInt, bInt := IsFloating(a), IsFloating(b), IsInteger(a), IsInteger(b)
|
aFloat, bFloat, aInt, bInt := IsFloating(a), IsFloating(b), IsInteger(a), IsInteger(b)
|
||||||
|
|
||||||
if bFloat && b.GetFloatVal() == 0 {
|
if bFloat && b.GetFloatVal() == 0 {
|
||||||
return nil, fmt.Errorf("cannot divide by zero")
|
return nil, errors.New("cannot divide by zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
if bInt && b.GetInt64Val() == 0 {
|
if bInt && b.GetInt64Val() == 0 {
|
||||||
return nil, fmt.Errorf("cannot divide by zero")
|
return nil, errors.New("cannot divide by zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
if aFloat && bFloat {
|
if aFloat && bFloat {
|
||||||
@ -228,12 +229,12 @@ func Modulo(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
|||||||
|
|
||||||
aInt, bInt := IsInteger(a), IsInteger(b)
|
aInt, bInt := IsInteger(a), IsInteger(b)
|
||||||
if !aInt || !bInt {
|
if !aInt || !bInt {
|
||||||
return nil, fmt.Errorf("modulo can only apply on integer")
|
return nil, errors.New("modulo can only apply on integer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// aInt && bInt
|
// aInt && bInt
|
||||||
if b.GetInt64Val() == 0 {
|
if b.GetInt64Val() == 0 {
|
||||||
return nil, fmt.Errorf("cannot modulo by zero")
|
return nil, errors.New("cannot modulo by zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
ret.dataType = schemapb.DataType_Int64
|
ret.dataType = schemapb.DataType_Int64
|
||||||
@ -281,29 +282,29 @@ func Power(a, b *planpb.GenericValue) *ExprWithType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BitAnd(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func BitAnd(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func BitOr(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func BitOr(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func BitXor(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func BitXor(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func ShiftLeft(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func ShiftLeft(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func ShiftRight(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func ShiftRight(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func And(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func And(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
aBool, bBool := IsBool(a), IsBool(b)
|
aBool, bBool := IsBool(a), IsBool(b)
|
||||||
if !aBool || !bBool {
|
if !aBool || !bBool {
|
||||||
return nil, fmt.Errorf("and can only apply on boolean")
|
return nil, errors.New("and can only apply on boolean")
|
||||||
}
|
}
|
||||||
return &ExprWithType{
|
return &ExprWithType{
|
||||||
dataType: schemapb.DataType_Bool,
|
dataType: schemapb.DataType_Bool,
|
||||||
@ -320,7 +321,7 @@ func And(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
|||||||
func Or(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
func Or(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
aBool, bBool := IsBool(a), IsBool(b)
|
aBool, bBool := IsBool(a), IsBool(b)
|
||||||
if !aBool || !bBool {
|
if !aBool || !bBool {
|
||||||
return nil, fmt.Errorf("or can only apply on boolean")
|
return nil, errors.New("or can only apply on boolean")
|
||||||
}
|
}
|
||||||
return &ExprWithType{
|
return &ExprWithType{
|
||||||
dataType: schemapb.DataType_Bool,
|
dataType: schemapb.DataType_Bool,
|
||||||
@ -335,7 +336,7 @@ func Or(a, b *planpb.GenericValue) (*ExprWithType, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BitNot(a *planpb.GenericValue) (*ExprWithType, error) {
|
func BitNot(a *planpb.GenericValue) (*ExprWithType, error) {
|
||||||
return nil, fmt.Errorf("todo: unsupported")
|
return nil, errors.New("todo: unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func Negative(a *planpb.GenericValue) *ExprWithType {
|
func Negative(a *planpb.GenericValue) *ExprWithType {
|
||||||
@ -419,7 +420,7 @@ func less() relationalFn {
|
|||||||
return a.GetInt64Val() < b.GetInt64Val(), nil
|
return a.GetInt64Val() < b.GetInt64Val(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, fmt.Errorf("incompatible data type")
|
return false, errors.New("incompatible data type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/antlr4-go/antlr/v4"
|
"github.com/antlr4-go/antlr/v4"
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
parser "github.com/milvus-io/milvus/internal/parser/planparserv2/generated"
|
parser "github.com/milvus-io/milvus/internal/parser/planparserv2/generated"
|
||||||
@ -152,7 +153,7 @@ func (v *ParserVisitor) VisitString(ctx *parser.StringContext) interface{} {
|
|||||||
|
|
||||||
func checkDirectComparisonBinaryField(columnInfo *planpb.ColumnInfo) error {
|
func checkDirectComparisonBinaryField(columnInfo *planpb.ColumnInfo) error {
|
||||||
if typeutil.IsArrayType(columnInfo.GetDataType()) && len(columnInfo.GetNestedPath()) == 0 {
|
if typeutil.IsArrayType(columnInfo.GetDataType()) && len(columnInfo.GetNestedPath()) == 0 {
|
||||||
return fmt.Errorf("can not comparisons array fields directly")
|
return errors.New("can not comparisons array fields directly")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -441,12 +442,12 @@ func (v *ParserVisitor) VisitLike(ctx *parser.LikeContext) interface{} {
|
|||||||
|
|
||||||
leftExpr := getExpr(left)
|
leftExpr := getExpr(left)
|
||||||
if leftExpr == nil {
|
if leftExpr == nil {
|
||||||
return fmt.Errorf("the left operand of like is invalid")
|
return errors.New("the left operand of like is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
column := toColumnInfo(leftExpr)
|
column := toColumnInfo(leftExpr)
|
||||||
if column == nil {
|
if column == nil {
|
||||||
return fmt.Errorf("like operation on complicated expr is unsupported")
|
return errors.New("like operation on complicated expr is unsupported")
|
||||||
}
|
}
|
||||||
if err := checkDirectComparisonBinaryField(column); err != nil {
|
if err := checkDirectComparisonBinaryField(column); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -454,7 +455,7 @@ func (v *ParserVisitor) VisitLike(ctx *parser.LikeContext) interface{} {
|
|||||||
|
|
||||||
if !typeutil.IsStringType(leftExpr.dataType) && !typeutil.IsJSONType(leftExpr.dataType) &&
|
if !typeutil.IsStringType(leftExpr.dataType) && !typeutil.IsJSONType(leftExpr.dataType) &&
|
||||||
!(typeutil.IsArrayType(leftExpr.dataType) && typeutil.IsStringType(column.GetElementType())) {
|
!(typeutil.IsArrayType(leftExpr.dataType) && typeutil.IsStringType(column.GetElementType())) {
|
||||||
return fmt.Errorf("like operation on non-string or no-json field is unsupported")
|
return errors.New("like operation on non-string or no-json field is unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
pattern, err := convertEscapeSingle(ctx.StringLiteral().GetText())
|
pattern, err := convertEscapeSingle(ctx.StringLiteral().GetText())
|
||||||
@ -487,7 +488,7 @@ func (v *ParserVisitor) VisitTextMatch(ctx *parser.TextMatchContext) interface{}
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !typeutil.IsStringType(column.dataType) {
|
if !typeutil.IsStringType(column.dataType) {
|
||||||
return fmt.Errorf("text match operation on non-string is unsupported")
|
return errors.New("text match operation on non-string is unsupported")
|
||||||
}
|
}
|
||||||
|
|
||||||
queryText, err := convertEscapeSingle(ctx.StringLiteral().GetText())
|
queryText, err := convertEscapeSingle(ctx.StringLiteral().GetText())
|
||||||
@ -677,11 +678,11 @@ func (v *ParserVisitor) VisitRange(ctx *parser.RangeContext) interface{} {
|
|||||||
if !isTemplateExpr(lowerValueExpr) && !isTemplateExpr(upperValueExpr) {
|
if !isTemplateExpr(lowerValueExpr) && !isTemplateExpr(upperValueExpr) {
|
||||||
if !(lowerInclusive && upperInclusive) {
|
if !(lowerInclusive && upperInclusive) {
|
||||||
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -759,11 +760,11 @@ func (v *ParserVisitor) VisitReverseRange(ctx *parser.ReverseRangeContext) inter
|
|||||||
if !isTemplateExpr(lowerValueExpr) && !isTemplateExpr(upperValueExpr) {
|
if !isTemplateExpr(lowerValueExpr) && !isTemplateExpr(upperValueExpr) {
|
||||||
if !(lowerInclusive && upperInclusive) {
|
if !(lowerInclusive && upperInclusive) {
|
||||||
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(GreaterEqual(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
if getGenericValue(Greater(lowerValue, upperValue)).GetBoolVal() {
|
||||||
return fmt.Errorf("invalid range: lowerbound is greater than upperbound")
|
return errors.New("invalid range: lowerbound is greater than upperbound")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -811,7 +812,7 @@ func (v *ParserVisitor) VisitUnary(ctx *parser.UnaryContext) interface{} {
|
|||||||
|
|
||||||
childExpr := getExpr(child)
|
childExpr := getExpr(child)
|
||||||
if childExpr == nil {
|
if childExpr == nil {
|
||||||
return fmt.Errorf("failed to parse unary expressions")
|
return errors.New("failed to parse unary expressions")
|
||||||
}
|
}
|
||||||
if err := checkDirectComparisonBinaryField(toColumnInfo(childExpr)); err != nil {
|
if err := checkDirectComparisonBinaryField(toColumnInfo(childExpr)); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -860,7 +861,7 @@ func (v *ParserVisitor) VisitLogicalOr(ctx *parser.LogicalOrContext) interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
if leftValue != nil || rightValue != nil {
|
if leftValue != nil || rightValue != nil {
|
||||||
return fmt.Errorf("'or' can only be used between boolean expressions")
|
return errors.New("'or' can only be used between boolean expressions")
|
||||||
}
|
}
|
||||||
|
|
||||||
var leftExpr *ExprWithType
|
var leftExpr *ExprWithType
|
||||||
@ -869,7 +870,7 @@ func (v *ParserVisitor) VisitLogicalOr(ctx *parser.LogicalOrContext) interface{}
|
|||||||
rightExpr = getExpr(right)
|
rightExpr = getExpr(right)
|
||||||
|
|
||||||
if !canBeExecuted(leftExpr) || !canBeExecuted(rightExpr) {
|
if !canBeExecuted(leftExpr) || !canBeExecuted(rightExpr) {
|
||||||
return fmt.Errorf("'or' can only be used between boolean expressions")
|
return errors.New("'or' can only be used between boolean expressions")
|
||||||
}
|
}
|
||||||
expr := &planpb.Expr{
|
expr := &planpb.Expr{
|
||||||
Expr: &planpb.Expr_BinaryExpr{
|
Expr: &planpb.Expr_BinaryExpr{
|
||||||
@ -909,7 +910,7 @@ func (v *ParserVisitor) VisitLogicalAnd(ctx *parser.LogicalAndContext) interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
if leftValue != nil || rightValue != nil {
|
if leftValue != nil || rightValue != nil {
|
||||||
return fmt.Errorf("'and' can only be used between boolean expressions")
|
return errors.New("'and' can only be used between boolean expressions")
|
||||||
}
|
}
|
||||||
|
|
||||||
var leftExpr *ExprWithType
|
var leftExpr *ExprWithType
|
||||||
@ -918,7 +919,7 @@ func (v *ParserVisitor) VisitLogicalAnd(ctx *parser.LogicalAndContext) interface
|
|||||||
rightExpr = getExpr(right)
|
rightExpr = getExpr(right)
|
||||||
|
|
||||||
if !canBeExecuted(leftExpr) || !canBeExecuted(rightExpr) {
|
if !canBeExecuted(leftExpr) || !canBeExecuted(rightExpr) {
|
||||||
return fmt.Errorf("'and' can only be used between boolean expressions")
|
return errors.New("'and' can only be used between boolean expressions")
|
||||||
}
|
}
|
||||||
expr := &planpb.Expr{
|
expr := &planpb.Expr{
|
||||||
Expr: &planpb.Expr_BinaryExpr{
|
Expr: &planpb.Expr_BinaryExpr{
|
||||||
@ -1017,7 +1018,7 @@ func (v *ParserVisitor) getColumnInfoFromJSONIdentifier(identifier string) (*pla
|
|||||||
if field.GetDataType() != schemapb.DataType_JSON &&
|
if field.GetDataType() != schemapb.DataType_JSON &&
|
||||||
field.GetDataType() != schemapb.DataType_Array {
|
field.GetDataType() != schemapb.DataType_Array {
|
||||||
errMsg := fmt.Sprintf("%s data type not supported accessed with []", field.GetDataType())
|
errMsg := fmt.Sprintf("%s data type not supported accessed with []", field.GetDataType())
|
||||||
return nil, fmt.Errorf(errMsg)
|
return nil, fmt.Errorf("%s", errMsg)
|
||||||
}
|
}
|
||||||
if fieldName != field.Name {
|
if fieldName != field.Name {
|
||||||
nestedPath = append(nestedPath, fieldName)
|
nestedPath = append(nestedPath, fieldName)
|
||||||
@ -1036,7 +1037,7 @@ func (v *ParserVisitor) getColumnInfoFromJSONIdentifier(identifier string) (*pla
|
|||||||
return nil, fmt.Errorf("invalid identifier: %s", identifier)
|
return nil, fmt.Errorf("invalid identifier: %s", identifier)
|
||||||
}
|
}
|
||||||
if typeutil.IsArrayType(field.DataType) {
|
if typeutil.IsArrayType(field.DataType) {
|
||||||
return nil, fmt.Errorf("can only access array field with integer index")
|
return nil, errors.New("can only access array field with integer index")
|
||||||
}
|
}
|
||||||
} else if _, err := strconv.ParseInt(path, 10, 64); err != nil {
|
} else if _, err := strconv.ParseInt(path, 10, 64); err != nil {
|
||||||
return nil, fmt.Errorf("json key must be enclosed in double quotes or single quotes: \"%s\"", path)
|
return nil, fmt.Errorf("json key must be enclosed in double quotes or single quotes: \"%s\"", path)
|
||||||
|
|||||||
@ -7,6 +7,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/json"
|
"github.com/milvus-io/milvus/internal/json"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/planpb"
|
||||||
@ -294,12 +296,12 @@ func handleBinaryArithExpr(op planpb.OpType, arithExpr *planpb.BinaryArithExpr,
|
|||||||
|
|
||||||
if leftExpr != nil && rightExpr != nil {
|
if leftExpr != nil && rightExpr != nil {
|
||||||
// a + b == 3
|
// a + b == 3
|
||||||
return nil, fmt.Errorf("not supported to do arithmetic operations between multiple fields")
|
return nil, errors.New("not supported to do arithmetic operations between multiple fields")
|
||||||
}
|
}
|
||||||
|
|
||||||
if leftValue != nil && rightValue != nil {
|
if leftValue != nil && rightValue != nil {
|
||||||
// 2 + 1 == 3
|
// 2 + 1 == 3
|
||||||
return nil, fmt.Errorf("unexpected, should be optimized already")
|
return nil, errors.New("unexpected, should be optimized already")
|
||||||
}
|
}
|
||||||
|
|
||||||
if leftExpr != nil && rightValue != nil {
|
if leftExpr != nil && rightValue != nil {
|
||||||
@ -320,11 +322,11 @@ func handleBinaryArithExpr(op planpb.OpType, arithExpr *planpb.BinaryArithExpr,
|
|||||||
case planpb.ArithOpType_Add, planpb.ArithOpType_Mul:
|
case planpb.ArithOpType_Add, planpb.ArithOpType_Mul:
|
||||||
return combineBinaryArithExpr(op, arithOp, arithExprDataType, rightExpr.GetInfo(), leftValue, valueExpr)
|
return combineBinaryArithExpr(op, arithOp, arithExprDataType, rightExpr.GetInfo(), leftValue, valueExpr)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("module field is not yet supported")
|
return nil, errors.New("module field is not yet supported")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// (a + b) / 2 == 3
|
// (a + b) / 2 == 3
|
||||||
return nil, fmt.Errorf("complicated arithmetic operations are not supported")
|
return nil, errors.New("complicated arithmetic operations are not supported")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,7 +350,7 @@ func handleCompareRightValue(op planpb.OpType, left *ExprWithType, right *planpb
|
|||||||
|
|
||||||
columnInfo := toColumnInfo(left)
|
columnInfo := toColumnInfo(left)
|
||||||
if columnInfo == nil {
|
if columnInfo == nil {
|
||||||
return nil, fmt.Errorf("not supported to combine multiple fields")
|
return nil, errors.New("not supported to combine multiple fields")
|
||||||
}
|
}
|
||||||
expr := &planpb.Expr{
|
expr := &planpb.Expr{
|
||||||
Expr: &planpb.Expr_UnaryRangeExpr{
|
Expr: &planpb.Expr_UnaryRangeExpr{
|
||||||
@ -388,7 +390,7 @@ func handleCompare(op planpb.OpType, left *ExprWithType, right *ExprWithType) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
if leftColumnInfo == nil || rightColumnInfo == nil {
|
if leftColumnInfo == nil || rightColumnInfo == nil {
|
||||||
return nil, fmt.Errorf("only comparison between two fields is supported")
|
return nil, errors.New("only comparison between two fields is supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
expr := &planpb.Expr{
|
expr := &planpb.Expr{
|
||||||
@ -629,7 +631,7 @@ func checkValidModArith(tokenType planpb.ArithOpType, leftType, leftElementType,
|
|||||||
switch tokenType {
|
switch tokenType {
|
||||||
case planpb.ArithOpType_Mod:
|
case planpb.ArithOpType_Mod:
|
||||||
if !canConvertToIntegerType(leftType, leftElementType) || !canConvertToIntegerType(rightType, rightElementType) {
|
if !canConvertToIntegerType(leftType, leftElementType) || !canConvertToIntegerType(rightType, rightElementType) {
|
||||||
return fmt.Errorf("modulo can only apply on integer types")
|
return errors.New("modulo can only apply on integer types")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@ -640,17 +642,17 @@ func castRangeValue(dataType schemapb.DataType, value *planpb.GenericValue) (*pl
|
|||||||
switch dataType {
|
switch dataType {
|
||||||
case schemapb.DataType_String, schemapb.DataType_VarChar:
|
case schemapb.DataType_String, schemapb.DataType_VarChar:
|
||||||
if !IsString(value) {
|
if !IsString(value) {
|
||||||
return nil, fmt.Errorf("invalid range operations")
|
return nil, errors.New("invalid range operations")
|
||||||
}
|
}
|
||||||
case schemapb.DataType_Bool:
|
case schemapb.DataType_Bool:
|
||||||
return nil, fmt.Errorf("invalid range operations on boolean expr")
|
return nil, errors.New("invalid range operations on boolean expr")
|
||||||
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64:
|
case schemapb.DataType_Int8, schemapb.DataType_Int16, schemapb.DataType_Int32, schemapb.DataType_Int64:
|
||||||
if !IsInteger(value) {
|
if !IsInteger(value) {
|
||||||
return nil, fmt.Errorf("invalid range operations")
|
return nil, errors.New("invalid range operations")
|
||||||
}
|
}
|
||||||
case schemapb.DataType_Float, schemapb.DataType_Double:
|
case schemapb.DataType_Float, schemapb.DataType_Double:
|
||||||
if !IsNumber(value) {
|
if !IsNumber(value) {
|
||||||
return nil, fmt.Errorf("invalid range operations")
|
return nil, errors.New("invalid range operations")
|
||||||
}
|
}
|
||||||
if IsInteger(value) {
|
if IsInteger(value) {
|
||||||
return NewFloat(float64(value.GetInt64Val())), nil
|
return NewFloat(float64(value.GetInt64Val())), nil
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -99,7 +100,7 @@ func (s *GrpcAccessInfoSuite) TestErrorMsg() {
|
|||||||
s.Equal(merr.ErrChannelLack.Error(), result[0])
|
s.Equal(merr.ErrChannelLack.Error(), result[0])
|
||||||
|
|
||||||
// replace line breaks
|
// replace line breaks
|
||||||
s.info.resp = merr.Status(fmt.Errorf("test error. stack: 1:\n 2:\n 3:\n"))
|
s.info.resp = merr.Status(errors.New("test error. stack: 1:\n 2:\n 3:\n"))
|
||||||
result = Get(s.info, "$error_msg")
|
result = Get(s.info, "$error_msg")
|
||||||
s.Equal("test error. stack: 1:\\n 2:\\n 3:\\n", result[0])
|
s.Equal("test error. stack: 1:\\n 2:\\n 3:\\n", result[0])
|
||||||
|
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
@ -33,7 +34,7 @@ var ClusterPrefix atomic.String
|
|||||||
func getCurUserFromContext(ctx context.Context) (string, error) {
|
func getCurUserFromContext(ctx context.Context) (string, error) {
|
||||||
md, ok := metadata.FromIncomingContext(ctx)
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("fail to get md from the context")
|
return "", errors.New("fail to get md from the context")
|
||||||
}
|
}
|
||||||
authorization, ok := md[strings.ToLower(util.HeaderAuthorize)]
|
authorization, ok := md[strings.ToLower(util.HeaderAuthorize)]
|
||||||
if !ok || len(authorization) < 1 {
|
if !ok || len(authorization) < 1 {
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
@ -78,7 +79,7 @@ func (l *CacheWriter) Write(p []byte) (n int, err error) {
|
|||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
if l.closed {
|
if l.closed {
|
||||||
return 0, fmt.Errorf("write to closed writer")
|
return 0, errors.New("write to closed writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.writer.Write(p)
|
return l.writer.Write(p)
|
||||||
@ -197,7 +198,7 @@ func (l *RotateWriter) Write(p []byte) (n int, err error) {
|
|||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
if l.closed {
|
if l.closed {
|
||||||
return 0, fmt.Errorf("write to closed writer")
|
return 0, errors.New("write to closed writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
writeLen := int64(len(p))
|
writeLen := int64(len(p))
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
@ -33,11 +34,11 @@ func ZapClientInfo(info *commonpb.ClientInfo) []zap.Field {
|
|||||||
func GetIdentifierFromContext(ctx context.Context) (int64, error) {
|
func GetIdentifierFromContext(ctx context.Context) (int64, error) {
|
||||||
md, ok := metadata.FromIncomingContext(ctx)
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("fail to get metadata from the context")
|
return 0, errors.New("fail to get metadata from the context")
|
||||||
}
|
}
|
||||||
identifierContent, ok := md[util.IdentifierKey]
|
identifierContent, ok := md[util.IdentifierKey]
|
||||||
if !ok || len(identifierContent) < 1 {
|
if !ok || len(identifierContent) < 1 {
|
||||||
return 0, fmt.Errorf("no identifier found in metadata")
|
return 0, errors.New("no identifier found in metadata")
|
||||||
}
|
}
|
||||||
identifier, err := strconv.ParseInt(identifierContent[0], 10, 64)
|
identifier, err := strconv.ParseInt(identifierContent[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -5515,19 +5515,19 @@ func (node *Proxy) SelectUser(ctx context.Context, req *milvuspb.SelectUserReque
|
|||||||
|
|
||||||
func (node *Proxy) validPrivilegeParams(req *milvuspb.OperatePrivilegeRequest) error {
|
func (node *Proxy) validPrivilegeParams(req *milvuspb.OperatePrivilegeRequest) error {
|
||||||
if req.Entity == nil {
|
if req.Entity == nil {
|
||||||
return fmt.Errorf("the entity in the request is nil")
|
return errors.New("the entity in the request is nil")
|
||||||
}
|
}
|
||||||
if req.Entity.Grantor == nil {
|
if req.Entity.Grantor == nil {
|
||||||
return fmt.Errorf("the grantor entity in the grant entity is nil")
|
return errors.New("the grantor entity in the grant entity is nil")
|
||||||
}
|
}
|
||||||
if req.Entity.Grantor.Privilege == nil {
|
if req.Entity.Grantor.Privilege == nil {
|
||||||
return fmt.Errorf("the privilege entity in the grantor entity is nil")
|
return errors.New("the privilege entity in the grantor entity is nil")
|
||||||
}
|
}
|
||||||
if err := ValidatePrivilege(req.Entity.Grantor.Privilege.Name); err != nil {
|
if err := ValidatePrivilege(req.Entity.Grantor.Privilege.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if req.Entity.Object == nil {
|
if req.Entity.Object == nil {
|
||||||
return fmt.Errorf("the resource entity in the grant entity is nil")
|
return errors.New("the resource entity in the grant entity is nil")
|
||||||
}
|
}
|
||||||
if err := ValidateObjectType(req.Entity.Object.Name); err != nil {
|
if err := ValidateObjectType(req.Entity.Object.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -5536,7 +5536,7 @@ func (node *Proxy) validPrivilegeParams(req *milvuspb.OperatePrivilegeRequest) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if req.Entity.Role == nil {
|
if req.Entity.Role == nil {
|
||||||
return fmt.Errorf("the object entity in the grant entity is nil")
|
return errors.New("the object entity in the grant entity is nil")
|
||||||
}
|
}
|
||||||
if err := ValidateRoleName(req.Entity.Role.Name); err != nil {
|
if err := ValidateRoleName(req.Entity.Role.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -19,7 +19,6 @@ package proxy
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
@ -1317,7 +1316,7 @@ func TestProxy_Delete(t *testing.T) {
|
|||||||
).Return(partitionID, nil)
|
).Return(partitionID, nil)
|
||||||
cache.On("GetCollectionInfo", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(basicInfo, nil)
|
cache.On("GetCollectionInfo", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(basicInfo, nil)
|
||||||
chMgr.On("getVChannels", mock.Anything).Return(channels, nil)
|
chMgr.On("getVChannels", mock.Anything).Return(channels, nil)
|
||||||
chMgr.On("getChannels", mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
chMgr.On("getChannels", mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
globalMetaCache = cache
|
globalMetaCache = cache
|
||||||
rc := mocks.NewMockRootCoordClient(t)
|
rc := mocks.NewMockRootCoordClient(t)
|
||||||
tsoAllocator := &mockTsoAllocator{}
|
tsoAllocator := &mockTsoAllocator{}
|
||||||
|
|||||||
@ -189,7 +189,7 @@ func (m *MockRootCoordClientInterface) GetCredential(ctx context.Context, req *r
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := fmt.Errorf("can't find credential: " + req.Username)
|
err := fmt.Errorf("can't find credential: %s", req.Username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -768,7 +768,7 @@ func TestMetaCache_PolicyInfo(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("InitMetaCache", func(t *testing.T) {
|
t.Run("InitMetaCache", func(t *testing.T) {
|
||||||
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
client.listPolicy = func(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) {
|
||||||
return nil, fmt.Errorf("mock error")
|
return nil, errors.New("mock error")
|
||||||
}
|
}
|
||||||
err := InitMetaCache(context.Background(), client, qc, mgr)
|
err := InitMetaCache(context.Background(), client, qc, mgr)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@ -1055,7 +1055,7 @@ func TestMetaCache_AllocID(t *testing.T) {
|
|||||||
rootCoord := mocks.NewMockRootCoordClient(t)
|
rootCoord := mocks.NewMockRootCoordClient(t)
|
||||||
rootCoord.EXPECT().AllocID(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocIDResponse{
|
rootCoord.EXPECT().AllocID(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocIDResponse{
|
||||||
Status: merr.Status(nil),
|
Status: merr.Status(nil),
|
||||||
}, fmt.Errorf("mock error"))
|
}, errors.New("mock error"))
|
||||||
rootCoord.EXPECT().ListPolicy(mock.Anything, mock.Anything).Return(&internalpb.ListPolicyResponse{
|
rootCoord.EXPECT().ListPolicy(mock.Anything, mock.Anything).Return(&internalpb.ListPolicyResponse{
|
||||||
Status: merr.Success(),
|
Status: merr.Success(),
|
||||||
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
PolicyInfos: []string{"policy1", "policy2", "policy3"},
|
||||||
@ -1073,7 +1073,7 @@ func TestMetaCache_AllocID(t *testing.T) {
|
|||||||
t.Run("failed", func(t *testing.T) {
|
t.Run("failed", func(t *testing.T) {
|
||||||
rootCoord := mocks.NewMockRootCoordClient(t)
|
rootCoord := mocks.NewMockRootCoordClient(t)
|
||||||
rootCoord.EXPECT().AllocID(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocIDResponse{
|
rootCoord.EXPECT().AllocID(mock.Anything, mock.Anything).Return(&rootcoordpb.AllocIDResponse{
|
||||||
Status: merr.Status(fmt.Errorf("mock failed")),
|
Status: merr.Status(errors.New("mock failed")),
|
||||||
}, nil)
|
}, nil)
|
||||||
rootCoord.EXPECT().ListPolicy(mock.Anything, mock.Anything).Return(&internalpb.ListPolicyResponse{
|
rootCoord.EXPECT().ListPolicy(mock.Anything, mock.Anything).Return(&internalpb.ListPolicyResponse{
|
||||||
Status: merr.Success(),
|
Status: merr.Success(),
|
||||||
|
|||||||
@ -562,7 +562,7 @@ func (node *Proxy) SetQueryNodeCreator(f func(ctx context.Context, addr string,
|
|||||||
// GetRateLimiter returns the rateLimiter in Proxy.
|
// GetRateLimiter returns the rateLimiter in Proxy.
|
||||||
func (node *Proxy) GetRateLimiter() (types.Limiter, error) {
|
func (node *Proxy) GetRateLimiter() (types.Limiter, error) {
|
||||||
if node.simpleLimiter == nil {
|
if node.simpleLimiter == nil {
|
||||||
return nil, fmt.Errorf("nil rate limiter in Proxy")
|
return nil, errors.New("nil rate limiter in Proxy")
|
||||||
}
|
}
|
||||||
return node.simpleLimiter, nil
|
return node.simpleLimiter, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -475,6 +475,7 @@ func TestProxy(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
log.Info("Register proxy done")
|
log.Info("Register proxy done")
|
||||||
defer func() {
|
defer func() {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
a := []any{rc, dc, qc, qn, in, dn, proxy}
|
a := []any{rc, dc, qc, qn, in, dn, proxy}
|
||||||
fmt.Println(len(a))
|
fmt.Println(len(a))
|
||||||
// HINT: the order of stopping service refers to the `roles.go` file
|
// HINT: the order of stopping service refers to the `roles.go` file
|
||||||
|
|||||||
@ -120,14 +120,14 @@ func parseSearchIteratorV2Info(searchParamsPair []*commonpb.KeyValuePair, groupB
|
|||||||
} else {
|
} else {
|
||||||
// Validate existing token is a valid UUID
|
// Validate existing token is a valid UUID
|
||||||
if _, err := uuid.Parse(token); err != nil {
|
if _, err := uuid.Parse(token); err != nil {
|
||||||
return nil, fmt.Errorf("invalid token format")
|
return nil, errors.New("invalid token format")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse batch size, required non-zero value
|
// parse batch size, required non-zero value
|
||||||
batchSizeStr, _ := funcutil.GetAttrByKeyFromRepeatedKV(SearchIterBatchSizeKey, searchParamsPair)
|
batchSizeStr, _ := funcutil.GetAttrByKeyFromRepeatedKV(SearchIterBatchSizeKey, searchParamsPair)
|
||||||
if batchSizeStr == "" {
|
if batchSizeStr == "" {
|
||||||
return nil, fmt.Errorf("batch size is required")
|
return nil, errors.New("batch size is required")
|
||||||
}
|
}
|
||||||
batchSize, err := strconv.ParseInt(batchSizeStr, 0, 64)
|
batchSize, err := strconv.ParseInt(batchSizeStr, 0, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -367,7 +367,7 @@ func (sa *segIDAssigner) syncSegments() (bool, error) {
|
|||||||
assign.lastInsertTime = now
|
assign.lastInsertTime = now
|
||||||
}
|
}
|
||||||
if !success {
|
if !success {
|
||||||
return false, fmt.Errorf(errMsg)
|
return false, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
return success, nil
|
return success, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,12 +18,12 @@ package proxy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ func (mockD *mockDataCoord5) AssignSegmentID(ctx context.Context, req *datapb.As
|
|||||||
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
||||||
Reason: "Just For Test",
|
Reason: "Just For Test",
|
||||||
},
|
},
|
||||||
}, fmt.Errorf("just for test")
|
}, errors.New("just for test")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSegmentAllocator5(t *testing.T) {
|
func TestSegmentAllocator5(t *testing.T) {
|
||||||
|
|||||||
@ -273,7 +273,7 @@ func (t *createCollectionTask) validatePartitionKey(ctx context.Context) error {
|
|||||||
|
|
||||||
if idx == -1 {
|
if idx == -1 {
|
||||||
if t.GetNumPartitions() != 0 {
|
if t.GetNumPartitions() != 0 {
|
||||||
return fmt.Errorf("num_partitions should only be specified with partition key field enabled")
|
return errors.New("num_partitions should only be specified with partition key field enabled")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Ctx(ctx).Info("create collection with partition key mode",
|
log.Ctx(ctx).Info("create collection with partition key mode",
|
||||||
|
|||||||
@ -691,7 +691,7 @@ func getPrimaryKeysFromUnaryRangeExpr(schema *schemapb.CollectionSchema, unaryRa
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return pks, fmt.Errorf("invalid field data type specifyed in simple delete expr")
|
return pks, errors.New("invalid field data type specifyed in simple delete expr")
|
||||||
}
|
}
|
||||||
|
|
||||||
return pks, nil
|
return pks, nil
|
||||||
@ -722,7 +722,7 @@ func getPrimaryKeysFromTermExpr(schema *schemapb.CollectionSchema, termExpr *pla
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return pks, 0, fmt.Errorf("invalid field data type specifyed in simple delete expr")
|
return pks, 0, errors.New("invalid field data type specifyed in simple delete expr")
|
||||||
}
|
}
|
||||||
|
|
||||||
return pks, pkCount, nil
|
return pks, pkCount, nil
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package proxy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
@ -447,7 +446,7 @@ func (s *DeleteRunnerSuite) TestInitFailure() {
|
|||||||
CollectionName: s.collectionName,
|
CollectionName: s.collectionName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
s.mockCache.EXPECT().GetDatabaseInfo(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
s.mockCache.EXPECT().GetDatabaseInfo(mock.Anything, mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
globalMetaCache = s.mockCache
|
globalMetaCache = s.mockCache
|
||||||
|
|
||||||
s.Error(dr.Init(context.Background()))
|
s.Error(dr.Init(context.Background()))
|
||||||
@ -460,7 +459,7 @@ func (s *DeleteRunnerSuite) TestInitFailure() {
|
|||||||
}
|
}
|
||||||
s.mockCache.EXPECT().GetDatabaseInfo(mock.Anything, mock.Anything).Return(&databaseInfo{dbID: 0}, nil)
|
s.mockCache.EXPECT().GetDatabaseInfo(mock.Anything, mock.Anything).Return(&databaseInfo{dbID: 0}, nil)
|
||||||
s.mockCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).
|
s.mockCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).
|
||||||
Return(int64(0), fmt.Errorf("mock get collectionID error"))
|
Return(int64(0), errors.New("mock get collectionID error"))
|
||||||
|
|
||||||
globalMetaCache = s.mockCache
|
globalMetaCache = s.mockCache
|
||||||
s.Error(dr.Init(context.Background()))
|
s.Error(dr.Init(context.Background()))
|
||||||
@ -647,7 +646,7 @@ func (s *DeleteRunnerSuite) TestInitFailure() {
|
|||||||
s.mockCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&collectionInfo{}, nil)
|
s.mockCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&collectionInfo{}, nil)
|
||||||
s.mockCache.EXPECT().GetPartitionsIndex(mock.Anything, mock.Anything, mock.Anything).Return([]string{"part1", "part2"}, nil)
|
s.mockCache.EXPECT().GetPartitionsIndex(mock.Anything, mock.Anything, mock.Anything).Return([]string{"part1", "part2"}, nil)
|
||||||
s.mockCache.EXPECT().GetPartitions(mock.Anything, mock.Anything, mock.Anything).Return(map[string]int64{"part1": 100, "part2": 101}, nil)
|
s.mockCache.EXPECT().GetPartitions(mock.Anything, mock.Anything, mock.Anything).Return(map[string]int64{"part1": 100, "part2": 101}, nil)
|
||||||
mockChMgr.EXPECT().getVChannels(mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
mockChMgr.EXPECT().getVChannels(mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
globalMetaCache = s.mockCache
|
globalMetaCache = s.mockCache
|
||||||
s.Error(dr.Init(context.Background()))
|
s.Error(dr.Init(context.Background()))
|
||||||
@ -735,7 +734,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
|||||||
stream := msgstream.NewMockMsgStream(t)
|
stream := msgstream.NewMockMsgStream(t)
|
||||||
mockMgr.EXPECT().getOrCreateDmlStream(mock.Anything, mock.Anything).Return(stream, nil)
|
mockMgr.EXPECT().getOrCreateDmlStream(mock.Anything, mock.Anything).Return(stream, nil)
|
||||||
mockMgr.EXPECT().getChannels(collectionID).Return(channels, nil)
|
mockMgr.EXPECT().getChannels(collectionID).Return(channels, nil)
|
||||||
stream.EXPECT().Produce(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error"))
|
stream.EXPECT().Produce(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||||
|
|
||||||
assert.Error(t, dr.Run(context.Background()))
|
assert.Error(t, dr.Run(context.Background()))
|
||||||
assert.Equal(t, int64(0), dr.result.DeleteCnt)
|
assert.Equal(t, int64(0), dr.result.DeleteCnt)
|
||||||
|
|||||||
@ -139,7 +139,7 @@ func (cit *createIndexTask) parseFunctionParamsToIndex(indexParamsMap map[string
|
|||||||
|
|
||||||
switch cit.functionSchema.GetType() {
|
switch cit.functionSchema.GetType() {
|
||||||
case schemapb.FunctionType_Unknown:
|
case schemapb.FunctionType_Unknown:
|
||||||
return fmt.Errorf("unknown function type encountered")
|
return errors.New("unknown function type encountered")
|
||||||
|
|
||||||
case schemapb.FunctionType_BM25:
|
case schemapb.FunctionType_BM25:
|
||||||
// set default BM25 params if not provided in index params
|
// set default BM25 params if not provided in index params
|
||||||
@ -301,12 +301,12 @@ func (cit *createIndexTask) parseIndexParams(ctx context.Context) error {
|
|||||||
metricType, metricTypeExist := indexParamsMap[common.MetricTypeKey]
|
metricType, metricTypeExist := indexParamsMap[common.MetricTypeKey]
|
||||||
|
|
||||||
if len(indexParamsMap) > numberParams+1 {
|
if len(indexParamsMap) > numberParams+1 {
|
||||||
return fmt.Errorf("only metric type can be passed when use AutoIndex")
|
return errors.New("only metric type can be passed when use AutoIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(indexParamsMap) == numberParams+1 {
|
if len(indexParamsMap) == numberParams+1 {
|
||||||
if !metricTypeExist {
|
if !metricTypeExist {
|
||||||
return fmt.Errorf("only metric type can be passed when use AutoIndex")
|
return errors.New("only metric type can be passed when use AutoIndex")
|
||||||
}
|
}
|
||||||
|
|
||||||
// only metric type is passed.
|
// only metric type is passed.
|
||||||
@ -349,7 +349,7 @@ func (cit *createIndexTask) parseIndexParams(ctx context.Context) error {
|
|||||||
|
|
||||||
indexType, exist := indexParamsMap[common.IndexTypeKey]
|
indexType, exist := indexParamsMap[common.IndexTypeKey]
|
||||||
if !exist {
|
if !exist {
|
||||||
return fmt.Errorf("IndexType not specified")
|
return errors.New("IndexType not specified")
|
||||||
}
|
}
|
||||||
// index parameters defined in the YAML file are merged with the user-provided parameters during create stage
|
// index parameters defined in the YAML file are merged with the user-provided parameters during create stage
|
||||||
if Params.KnowhereConfig.Enable.GetAsBool() {
|
if Params.KnowhereConfig.Enable.GetAsBool() {
|
||||||
@ -459,7 +459,7 @@ func fillDimension(field *schemapb.FieldSchema, indexParams map[string]string) e
|
|||||||
params = append(params, field.GetIndexParams()...)
|
params = append(params, field.GetIndexParams()...)
|
||||||
dimensionInSchema, err := funcutil.GetAttrByKeyFromRepeatedKV(DimKey, params)
|
dimensionInSchema, err := funcutil.GetAttrByKeyFromRepeatedKV(DimKey, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dimension not found in schema")
|
return errors.New("dimension not found in schema")
|
||||||
}
|
}
|
||||||
dimension, exist := indexParams[DimKey]
|
dimension, exist := indexParams[DimKey]
|
||||||
if exist {
|
if exist {
|
||||||
|
|||||||
@ -2,12 +2,12 @@ package proxy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/types"
|
"github.com/milvus-io/milvus/internal/types"
|
||||||
@ -34,7 +34,7 @@ func TestRoundRobinPolicy(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, querier.records(), map[UniqueID][]string{0: {"c0", "c2"}, 1: {"c1", "c3"}})
|
assert.Equal(t, querier.records(), map[UniqueID][]string{0: {"c0", "c2"}, 1: {"c1", "c3"}})
|
||||||
|
|
||||||
mockerr := fmt.Errorf("mock query node error")
|
mockerr := errors.New("mock query node error")
|
||||||
querier.init()
|
querier.init()
|
||||||
querier.failset[0] = mockerr
|
querier.failset[0] = mockerr
|
||||||
|
|
||||||
|
|||||||
@ -1272,7 +1272,7 @@ func TestQueryTask_CanSkipAllocTimestamp(t *testing.T) {
|
|||||||
mockMetaCache.ExpectedCalls = nil
|
mockMetaCache.ExpectedCalls = nil
|
||||||
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, nil)
|
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, nil)
|
||||||
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
||||||
nil, fmt.Errorf("mock error")).Once()
|
nil, errors.New("mock error")).Once()
|
||||||
|
|
||||||
qt := &queryTask{
|
qt := &queryTask{
|
||||||
request: &milvuspb.QueryRequest{
|
request: &milvuspb.QueryRequest{
|
||||||
@ -1288,7 +1288,7 @@ func TestQueryTask_CanSkipAllocTimestamp(t *testing.T) {
|
|||||||
assert.False(t, skip)
|
assert.False(t, skip)
|
||||||
|
|
||||||
mockMetaCache.ExpectedCalls = nil
|
mockMetaCache.ExpectedCalls = nil
|
||||||
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, fmt.Errorf("mock error"))
|
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, errors.New("mock error"))
|
||||||
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
||||||
&collectionInfo{
|
&collectionInfo{
|
||||||
collID: collID,
|
collID: collID,
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
@ -593,7 +594,7 @@ func TestTaskScheduler_concurrentPushAndPop(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
task := scheduler.scheduleDmTask()
|
task := scheduler.scheduleDmTask()
|
||||||
scheduler.dmQueue.AddActiveTask(task)
|
scheduler.dmQueue.AddActiveTask(task)
|
||||||
chMgr.EXPECT().getChannels(mock.Anything).Return(nil, fmt.Errorf("mock err"))
|
chMgr.EXPECT().getChannels(mock.Anything).Return(nil, errors.New("mock err"))
|
||||||
scheduler.dmQueue.PopActiveTask(task.ID()) // assert no panic
|
scheduler.dmQueue.PopActiveTask(task.ID()) // assert no panic
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -659,7 +660,7 @@ func TestTaskScheduler_SkipAllocTimestamp(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
mockMetaCache.EXPECT().AllocID(mock.Anything).Return(0, fmt.Errorf("mock error")).Once()
|
mockMetaCache.EXPECT().AllocID(mock.Anything).Return(0, errors.New("mock error")).Once()
|
||||||
t.Run("failed", func(t *testing.T) {
|
t.Run("failed", func(t *testing.T) {
|
||||||
st := &searchTask{
|
st := &searchTask{
|
||||||
SearchRequest: &internalpb.SearchRequest{
|
SearchRequest: &internalpb.SearchRequest{
|
||||||
|
|||||||
@ -3077,12 +3077,12 @@ func TestSearchTask_Requery(t *testing.T) {
|
|||||||
schema := newSchemaInfo(collSchema)
|
schema := newSchemaInfo(collSchema)
|
||||||
qn := mocks.NewMockQueryNodeClient(t)
|
qn := mocks.NewMockQueryNodeClient(t)
|
||||||
qn.EXPECT().Query(mock.Anything, mock.Anything).
|
qn.EXPECT().Query(mock.Anything, mock.Anything).
|
||||||
Return(nil, fmt.Errorf("mock err 1"))
|
Return(nil, errors.New("mock err 1"))
|
||||||
|
|
||||||
lb := NewMockLBPolicy(t)
|
lb := NewMockLBPolicy(t)
|
||||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
||||||
_ = workload.exec(ctx, 0, qn, "")
|
_ = workload.exec(ctx, 0, qn, "")
|
||||||
}).Return(fmt.Errorf("mock err 1"))
|
}).Return(errors.New("mock err 1"))
|
||||||
node.lbPolicy = lb
|
node.lbPolicy = lb
|
||||||
|
|
||||||
qt := &searchTask{
|
qt := &searchTask{
|
||||||
@ -3111,12 +3111,12 @@ func TestSearchTask_Requery(t *testing.T) {
|
|||||||
schema := newSchemaInfo(collSchema)
|
schema := newSchemaInfo(collSchema)
|
||||||
qn := mocks.NewMockQueryNodeClient(t)
|
qn := mocks.NewMockQueryNodeClient(t)
|
||||||
qn.EXPECT().Query(mock.Anything, mock.Anything).
|
qn.EXPECT().Query(mock.Anything, mock.Anything).
|
||||||
Return(nil, fmt.Errorf("mock err 1"))
|
Return(nil, errors.New("mock err 1"))
|
||||||
|
|
||||||
lb := NewMockLBPolicy(t)
|
lb := NewMockLBPolicy(t)
|
||||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
||||||
_ = workload.exec(ctx, 0, qn, "")
|
_ = workload.exec(ctx, 0, qn, "")
|
||||||
}).Return(fmt.Errorf("mock err 1"))
|
}).Return(errors.New("mock err 1"))
|
||||||
node.lbPolicy = lb
|
node.lbPolicy = lb
|
||||||
|
|
||||||
resultIDs := &schemapb.IDs{
|
resultIDs := &schemapb.IDs{
|
||||||
@ -3350,7 +3350,7 @@ func TestSearchTask_CanSkipAllocTimestamp(t *testing.T) {
|
|||||||
mockMetaCache.ExpectedCalls = nil
|
mockMetaCache.ExpectedCalls = nil
|
||||||
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, nil)
|
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, nil)
|
||||||
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
||||||
nil, fmt.Errorf("mock error")).Once()
|
nil, errors.New("mock error")).Once()
|
||||||
|
|
||||||
st := &searchTask{
|
st := &searchTask{
|
||||||
request: &milvuspb.SearchRequest{
|
request: &milvuspb.SearchRequest{
|
||||||
@ -3366,7 +3366,7 @@ func TestSearchTask_CanSkipAllocTimestamp(t *testing.T) {
|
|||||||
assert.False(t, skip)
|
assert.False(t, skip)
|
||||||
|
|
||||||
mockMetaCache.ExpectedCalls = nil
|
mockMetaCache.ExpectedCalls = nil
|
||||||
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, fmt.Errorf("mock error"))
|
mockMetaCache.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(collID, errors.New("mock error"))
|
||||||
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
mockMetaCache.EXPECT().GetCollectionInfo(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
|
||||||
&collectionInfo{
|
&collectionInfo{
|
||||||
collID: collID,
|
collID: collID,
|
||||||
|
|||||||
@ -64,8 +64,8 @@ type upsertTask struct {
|
|||||||
partitionKeyMode bool
|
partitionKeyMode bool
|
||||||
partitionKeys *schemapb.FieldData
|
partitionKeys *schemapb.FieldData
|
||||||
// automatic generate pk as new pk wehen autoID == true
|
// automatic generate pk as new pk wehen autoID == true
|
||||||
// delete task need use the oldIds
|
// delete task need use the oldIDs
|
||||||
oldIds *schemapb.IDs
|
oldIDs *schemapb.IDs
|
||||||
schemaTimestamp uint64
|
schemaTimestamp uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ func (it *upsertTask) insertPreExecute(ctx context.Context) error {
|
|||||||
// use the passed pk as new pk when autoID == false
|
// use the passed pk as new pk when autoID == false
|
||||||
// automatic generate pk as new pk wehen autoID == true
|
// automatic generate pk as new pk wehen autoID == true
|
||||||
var err error
|
var err error
|
||||||
it.result.IDs, it.oldIds, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
|
it.result.IDs, it.oldIDs, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
|
||||||
log := log.Ctx(ctx).With(zap.String("collectionName", it.upsertMsg.InsertMsg.CollectionName))
|
log := log.Ctx(ctx).With(zap.String("collectionName", it.upsertMsg.InsertMsg.CollectionName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("check primary field data and hash primary key failed when upsert",
|
log.Warn("check primary field data and hash primary key failed when upsert",
|
||||||
@ -488,7 +488,7 @@ func (it *upsertTask) deleteExecute(ctx context.Context, msgPack *msgstream.MsgP
|
|||||||
it.result.Status = merr.Status(err)
|
it.result.Status = merr.Status(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
|
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
|
||||||
it.upsertMsg.DeleteMsg.HashValues = typeutil.HashPK2Channels(it.upsertMsg.DeleteMsg.PrimaryKeys, channelNames)
|
it.upsertMsg.DeleteMsg.HashValues = typeutil.HashPK2Channels(it.upsertMsg.DeleteMsg.PrimaryKeys, channelNames)
|
||||||
|
|
||||||
// repack delete msg by dmChannel
|
// repack delete msg by dmChannel
|
||||||
|
|||||||
@ -96,7 +96,7 @@ func (ut *upsertTaskByStreamingService) packInsertMessage(ctx context.Context) (
|
|||||||
func (it *upsertTaskByStreamingService) packDeleteMessage(ctx context.Context) ([]message.MutableMessage, error) {
|
func (it *upsertTaskByStreamingService) packDeleteMessage(ctx context.Context) ([]message.MutableMessage, error) {
|
||||||
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy deleteExecute upsert %d", it.ID()))
|
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy deleteExecute upsert %d", it.ID()))
|
||||||
collID := it.upsertMsg.DeleteMsg.CollectionID
|
collID := it.upsertMsg.DeleteMsg.CollectionID
|
||||||
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
|
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
|
||||||
log := log.Ctx(ctx).With(
|
log := log.Ctx(ctx).With(
|
||||||
zap.Int64("collectionID", collID))
|
zap.Int64("collectionID", collID))
|
||||||
// hash primary keys to channels
|
// hash primary keys to channels
|
||||||
|
|||||||
@ -22,6 +22,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
|
||||||
@ -69,7 +71,7 @@ func (ta *timestampAllocator) alloc(ctx context.Context, count uint32) ([]Timest
|
|||||||
return nil, fmt.Errorf("syncTimeStamp Failed:%s", resp.GetStatus().GetReason())
|
return nil, fmt.Errorf("syncTimeStamp Failed:%s", resp.GetStatus().GetReason())
|
||||||
}
|
}
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
return nil, fmt.Errorf("empty AllocTimestampResponse")
|
return nil, errors.New("empty AllocTimestampResponse")
|
||||||
}
|
}
|
||||||
start, cnt := resp.GetTimestamp(), resp.GetCount()
|
start, cnt := resp.GetTimestamp(), resp.GetCount()
|
||||||
ret := make([]Timestamp, cnt)
|
ret := make([]Timestamp, cnt)
|
||||||
|
|||||||
@ -392,7 +392,7 @@ func validateMaxCapacityPerRow(collectionName string, field *schemapb.FieldSchem
|
|||||||
return fmt.Errorf("the value for %s of field %s must be an integer", common.MaxCapacityKey, field.GetName())
|
return fmt.Errorf("the value for %s of field %s must be an integer", common.MaxCapacityKey, field.GetName())
|
||||||
}
|
}
|
||||||
if maxCapacityPerRow > defaultMaxArrayCapacity || maxCapacityPerRow <= 0 {
|
if maxCapacityPerRow > defaultMaxArrayCapacity || maxCapacityPerRow <= 0 {
|
||||||
return fmt.Errorf("the maximum capacity specified for a Array should be in (0, 4096]")
|
return errors.New("the maximum capacity specified for a Array should be in (0, 4096]")
|
||||||
}
|
}
|
||||||
exist = true
|
exist = true
|
||||||
}
|
}
|
||||||
@ -634,7 +634,7 @@ func validatePrimaryKey(coll *schemapb.CollectionSchema) error {
|
|||||||
// If autoID is required, it is recommended to use int64 field as the primary key
|
// If autoID is required, it is recommended to use int64 field as the primary key
|
||||||
//if field.DataType == schemapb.DataType_VarChar {
|
//if field.DataType == schemapb.DataType_VarChar {
|
||||||
// if field.AutoID {
|
// if field.AutoID {
|
||||||
// return fmt.Errorf("autoID is not supported when the VarChar field is the primary key")
|
// return errors.New("autoID is not supported when the VarChar field is the primary key")
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
|
|
||||||
@ -650,7 +650,7 @@ func validatePrimaryKey(coll *schemapb.CollectionSchema) error {
|
|||||||
func validateDynamicField(coll *schemapb.CollectionSchema) error {
|
func validateDynamicField(coll *schemapb.CollectionSchema) error {
|
||||||
for _, field := range coll.Fields {
|
for _, field := range coll.Fields {
|
||||||
if field.IsDynamic {
|
if field.IsDynamic {
|
||||||
return fmt.Errorf("cannot explicitly set a field as a dynamic field")
|
return errors.New("cannot explicitly set a field as a dynamic field")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -715,12 +715,12 @@ func validateSchema(coll *schemapb.CollectionSchema) error {
|
|||||||
// primary key detector
|
// primary key detector
|
||||||
if field.IsPrimaryKey {
|
if field.IsPrimaryKey {
|
||||||
if autoID {
|
if autoID {
|
||||||
return fmt.Errorf("autoId forbids primary key")
|
return errors.New("autoId forbids primary key")
|
||||||
} else if primaryIdx != -1 {
|
} else if primaryIdx != -1 {
|
||||||
return fmt.Errorf("there are more than one primary key, field name = %s, %s", coll.Fields[primaryIdx].Name, field.Name)
|
return fmt.Errorf("there are more than one primary key, field name = %s, %s", coll.Fields[primaryIdx].Name, field.Name)
|
||||||
}
|
}
|
||||||
if field.DataType != schemapb.DataType_Int64 {
|
if field.DataType != schemapb.DataType_Int64 {
|
||||||
return fmt.Errorf("type of primary key should be int64")
|
return errors.New("type of primary key should be int64")
|
||||||
}
|
}
|
||||||
primaryIdx = idx
|
primaryIdx = idx
|
||||||
}
|
}
|
||||||
@ -781,7 +781,7 @@ func validateSchema(coll *schemapb.CollectionSchema) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !autoID && primaryIdx == -1 {
|
if !autoID && primaryIdx == -1 {
|
||||||
return fmt.Errorf("primary key is required for non autoid mode")
|
return errors.New("primary key is required for non autoid mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -865,7 +865,7 @@ func checkFunctionOutputField(function *schemapb.FunctionSchema, fields []*schem
|
|||||||
return fmt.Errorf("BM25 function output field must be a SparseFloatVector field, but got %s", fields[0].DataType.String())
|
return fmt.Errorf("BM25 function output field must be a SparseFloatVector field, but got %s", fields[0].DataType.String())
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("check output field for unknown function type")
|
return errors.New("check output field for unknown function type")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -879,18 +879,18 @@ func checkFunctionInputField(function *schemapb.FunctionSchema, fields []*schema
|
|||||||
}
|
}
|
||||||
h := typeutil.CreateFieldSchemaHelper(fields[0])
|
h := typeutil.CreateFieldSchemaHelper(fields[0])
|
||||||
if !h.EnableAnalyzer() {
|
if !h.EnableAnalyzer() {
|
||||||
return fmt.Errorf("BM25 function input field must set enable_analyzer to true")
|
return errors.New("BM25 function input field must set enable_analyzer to true")
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("check input field with unknown function type")
|
return errors.New("check input field with unknown function type")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFunctionBasicParams(function *schemapb.FunctionSchema) error {
|
func checkFunctionBasicParams(function *schemapb.FunctionSchema) error {
|
||||||
if function.GetName() == "" {
|
if function.GetName() == "" {
|
||||||
return fmt.Errorf("function name cannot be empty")
|
return errors.New("function name cannot be empty")
|
||||||
}
|
}
|
||||||
if len(function.GetInputFieldNames()) == 0 {
|
if len(function.GetInputFieldNames()) == 0 {
|
||||||
return fmt.Errorf("function input field names cannot be empty, function: %s", function.GetName())
|
return fmt.Errorf("function input field names cannot be empty, function: %s", function.GetName())
|
||||||
@ -921,10 +921,10 @@ func checkFunctionBasicParams(function *schemapb.FunctionSchema) error {
|
|||||||
switch function.GetType() {
|
switch function.GetType() {
|
||||||
case schemapb.FunctionType_BM25:
|
case schemapb.FunctionType_BM25:
|
||||||
if len(function.GetParams()) != 0 {
|
if len(function.GetParams()) != 0 {
|
||||||
return fmt.Errorf("BM25 function accepts no params")
|
return errors.New("BM25 function accepts no params")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("check function params with unknown function type")
|
return errors.New("check function params with unknown function type")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1404,9 +1404,9 @@ func computeRecall(results *schemapb.SearchResultData, gts *schemapb.SearchResul
|
|||||||
results.Recalls = recalls
|
results.Recalls = recalls
|
||||||
return nil
|
return nil
|
||||||
case *schemapb.IDs_StrId:
|
case *schemapb.IDs_StrId:
|
||||||
return fmt.Errorf("pk type is inconsistent between search results(int64) and ground truth(string)")
|
return errors.New("pk type is inconsistent between search results(int64) and ground truth(string)")
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported pk type")
|
return errors.New("unsupported pk type")
|
||||||
}
|
}
|
||||||
|
|
||||||
case *schemapb.IDs_StrId:
|
case *schemapb.IDs_StrId:
|
||||||
@ -1426,12 +1426,12 @@ func computeRecall(results *schemapb.SearchResultData, gts *schemapb.SearchResul
|
|||||||
results.Recalls = recalls
|
results.Recalls = recalls
|
||||||
return nil
|
return nil
|
||||||
case *schemapb.IDs_IntId:
|
case *schemapb.IDs_IntId:
|
||||||
return fmt.Errorf("pk type is inconsistent between search results(string) and ground truth(int64)")
|
return errors.New("pk type is inconsistent between search results(string) and ground truth(int64)")
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported pk type")
|
return errors.New("unsupported pk type")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported pk type")
|
return errors.New("unsupported pk type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1509,7 +1509,7 @@ func translateOutputFields(outputFields []string, schema *schemaInfo, removePkFi
|
|||||||
expr.GetColumnExpr().GetInfo().GetNestedPath()[0] == outputFieldName {
|
expr.GetColumnExpr().GetInfo().GetNestedPath()[0] == outputFieldName {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("not support getting subkeys of json field yet")
|
return errors.New("not support getting subkeys of json field yet")
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("parse output field name failed", zap.String("field name", outputFieldName))
|
log.Info("parse output field name failed", zap.String("field name", outputFieldName))
|
||||||
@ -1831,12 +1831,12 @@ func checkUpsertPrimaryFieldData(schema *schemapb.CollectionSchema, insertMsg *m
|
|||||||
if !primaryFieldSchema.GetAutoID() {
|
if !primaryFieldSchema.GetAutoID() {
|
||||||
return ids, ids, nil
|
return ids, ids, nil
|
||||||
}
|
}
|
||||||
newIds, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
|
newIDs, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("parse primary field data to IDs failed", zap.Error(err))
|
log.Warn("parse primary field data to IDs failed", zap.Error(err))
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return newIds, ids, nil
|
return newIDs, ids, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPartitionKeyFieldData(fieldSchema *schemapb.FieldSchema, insertMsg *msgstream.InsertMsg) (*schemapb.FieldData, error) {
|
func getPartitionKeyFieldData(fieldSchema *schemapb.FieldSchema, insertMsg *msgstream.InsertMsg) (*schemapb.FieldData, error) {
|
||||||
@ -2335,7 +2335,7 @@ func GetRequestInfo(ctx context.Context, req proto.Message) (int64, map[int64][]
|
|||||||
return util.InvalidDBID, map[int64][]int64{}, internalpb.RateType_DDLDB, 1, nil
|
return util.InvalidDBID, map[int64][]int64{}, internalpb.RateType_DDLDB, 1, nil
|
||||||
default: // TODO: support more request
|
default: // TODO: support more request
|
||||||
if req == nil {
|
if req == nil {
|
||||||
return util.InvalidDBID, map[int64][]int64{}, 0, 0, fmt.Errorf("null request")
|
return util.InvalidDBID, map[int64][]int64{}, 0, 0, errors.New("null request")
|
||||||
}
|
}
|
||||||
log.RatedWarn(60, "not supported request type for rate limiter", zap.String("type", reflect.TypeOf(req).String()))
|
log.RatedWarn(60, "not supported request type for rate limiter", zap.String("type", reflect.TypeOf(req).String()))
|
||||||
return util.InvalidDBID, map[int64][]int64{}, 0, 0, nil
|
return util.InvalidDBID, map[int64][]int64{}, 0, 0, nil
|
||||||
|
|||||||
@ -930,7 +930,7 @@ func TestPasswordVerify(t *testing.T) {
|
|||||||
mockedRootCoord := newMockRootCoord()
|
mockedRootCoord := newMockRootCoord()
|
||||||
mockedRootCoord.GetGetCredentialFunc = func(ctx context.Context, req *rootcoordpb.GetCredentialRequest, opts ...grpc.CallOption) (*rootcoordpb.GetCredentialResponse, error) {
|
mockedRootCoord.GetGetCredentialFunc = func(ctx context.Context, req *rootcoordpb.GetCredentialRequest, opts ...grpc.CallOption) (*rootcoordpb.GetCredentialResponse, error) {
|
||||||
invokedCount++
|
invokedCount++
|
||||||
return nil, fmt.Errorf("get cred not found credential")
|
return nil, errors.New("get cred not found credential")
|
||||||
}
|
}
|
||||||
|
|
||||||
metaCache := &MetaCache{
|
metaCache := &MetaCache{
|
||||||
|
|||||||
@ -18,7 +18,6 @@ package meta
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -278,7 +277,7 @@ func (broker *CoordinatorBroker) GetSegmentInfo(ctx context.Context, ids ...Uniq
|
|||||||
|
|
||||||
if len(resp.Infos) == 0 {
|
if len(resp.Infos) == 0 {
|
||||||
log.Warn("No such segment in DataCoord")
|
log.Warn("No such segment in DataCoord")
|
||||||
return nil, fmt.Errorf("no such segment in DataCoord")
|
return nil, errors.New("no such segment in DataCoord")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = binlog.DecompressMultiBinLogs(resp.GetInfos())
|
err = binlog.DecompressMultiBinLogs(resp.GetInfos())
|
||||||
|
|||||||
@ -184,7 +184,7 @@ func (s *Server) initSession() error {
|
|||||||
// Init QueryCoord session
|
// Init QueryCoord session
|
||||||
s.session = sessionutil.NewSession(s.ctx)
|
s.session = sessionutil.NewSession(s.ctx)
|
||||||
if s.session == nil {
|
if s.session == nil {
|
||||||
return fmt.Errorf("failed to create session")
|
return errors.New("failed to create session")
|
||||||
}
|
}
|
||||||
s.session.Init(typeutil.QueryCoordRole, s.address, true, true)
|
s.session.Init(typeutil.QueryCoordRole, s.address, true, true)
|
||||||
s.enableActiveStandBy = Params.QueryCoordCfg.EnableActiveStandby.GetAsBool()
|
s.enableActiveStandBy = Params.QueryCoordCfg.EnableActiveStandby.GetAsBool()
|
||||||
|
|||||||
@ -113,7 +113,7 @@ func GetShardLeadersWithChannels(ctx context.Context, m *meta.Meta, targetMgr me
|
|||||||
) ([]*querypb.ShardLeadersList, error) {
|
) ([]*querypb.ShardLeadersList, error) {
|
||||||
ret := make([]*querypb.ShardLeadersList, 0)
|
ret := make([]*querypb.ShardLeadersList, 0)
|
||||||
for _, channel := range channels {
|
for _, channel := range channels {
|
||||||
log := log.With(zap.String("channel", channel.GetChannelName()))
|
log := log.Ctx(ctx).With(zap.String("channel", channel.GetChannelName()))
|
||||||
|
|
||||||
var channelErr error
|
var channelErr error
|
||||||
leaders := dist.LeaderViewManager.GetByFilter(meta.WithChannelName2LeaderView(channel.GetChannelName()))
|
leaders := dist.LeaderViewManager.GetByFilter(meta.WithChannelName2LeaderView(channel.GetChannelName()))
|
||||||
|
|||||||
@ -1041,7 +1041,7 @@ func (sd *shardDelegator) buildBM25IDF(req *internalpb.SearchRequest) (float64,
|
|||||||
|
|
||||||
tfArray, ok := output[0].(*schemapb.SparseFloatArray)
|
tfArray, ok := output[0].(*schemapb.SparseFloatArray)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("functionRunner return unknown data")
|
return 0, errors.New("functionRunner return unknown data")
|
||||||
}
|
}
|
||||||
|
|
||||||
idfSparseVector, avgdl, err := sd.idfOracle.BuildIDF(req.GetFieldId(), tfArray)
|
idfSparseVector, avgdl, err := sd.idfOracle.BuildIDF(req.GetFieldId(), tfArray)
|
||||||
|
|||||||
@ -609,7 +609,7 @@ func (s *DelegatorDataSuite) TestLoadSegmentsWithBm25() {
|
|||||||
s.loader.ExpectedCalls = nil
|
s.loader.ExpectedCalls = nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.loader.EXPECT().LoadBM25Stats(mock.Anything, s.collectionID, mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
s.loader.EXPECT().LoadBM25Stats(mock.Anything, s.collectionID, mock.Anything).Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
workers := make(map[int64]*cluster.MockWorker)
|
workers := make(map[int64]*cluster.MockWorker)
|
||||||
worker1 := &cluster.MockWorker{}
|
worker1 := &cluster.MockWorker{}
|
||||||
|
|||||||
@ -18,9 +18,9 @@ package delegator
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
@ -76,7 +76,7 @@ func (s *bm25Stats) Minus(stats map[int64]*storage.BM25Stats) {
|
|||||||
func (s *bm25Stats) GetStats(fieldID int64) (*storage.BM25Stats, error) {
|
func (s *bm25Stats) GetStats(fieldID int64) (*storage.BM25Stats, error) {
|
||||||
stats, ok := s.stats[fieldID]
|
stats, ok := s.stats[fieldID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("field not found in idf oracle BM25 stats")
|
return nil, errors.New("field not found in idf oracle BM25 stats")
|
||||||
}
|
}
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -153,7 +154,7 @@ func (eNode *embeddingNode) bm25Embedding(runner function.FunctionRunner, msg *m
|
|||||||
|
|
||||||
sparseArray, ok := output[0].(*schemapb.SparseFloatArray)
|
sparseArray, ok := output[0].(*schemapb.SparseFloatArray)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("BM25 runner return unknown type output")
|
return errors.New("BM25 runner return unknown type output")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := stats[outputFieldID]; !ok {
|
if _, ok := stats[outputFieldID]; !ok {
|
||||||
@ -175,7 +176,7 @@ func (eNode *embeddingNode) embedding(msg *msgstream.InsertMsg, stats map[int64]
|
|||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Warn("pipeline embedding with unknown function type", zap.Any("type", functionSchema.GetType()))
|
log.Warn("pipeline embedding with unknown function type", zap.Any("type", functionSchema.GetType()))
|
||||||
return fmt.Errorf("unknown function type")
|
return errors.New("unknown function type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -17,9 +17,9 @@
|
|||||||
package pipeline
|
package pipeline
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
@ -252,7 +252,8 @@ func (suite *EmbeddingNodeSuite) TestBM25Embedding() {
|
|||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
||||||
runner := function.NewMockFunctionRunner(suite.T())
|
runner := function.NewMockFunctionRunner(suite.T())
|
||||||
runner.EXPECT().BatchRun(mock.Anything).Return(nil, fmt.Errorf("mock error"))
|
runner.EXPECT().BatchRun(mock.Anything).Return(nil, errors.Errorf("mock error"))
|
||||||
|
runner.EXPECT().GetSchema().Return(suite.collectionSchema.GetFunctions()[0]).Maybe()
|
||||||
runner.EXPECT().GetOutputFields().Return([]*schemapb.FieldSchema{suite.collectionSchema.Fields[3]})
|
runner.EXPECT().GetOutputFields().Return([]*schemapb.FieldSchema{suite.collectionSchema.Fields[3]})
|
||||||
runner.EXPECT().GetInputFields().Return([]*schemapb.FieldSchema{suite.collectionSchema.Fields[2]})
|
runner.EXPECT().GetInputFields().Return([]*schemapb.FieldSchema{suite.collectionSchema.Fields[2]})
|
||||||
|
|
||||||
|
|||||||
@ -27,6 +27,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
|
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
|
||||||
"github.com/milvus-io/milvus/internal/util/vecindexmgr"
|
"github.com/milvus-io/milvus/internal/util/vecindexmgr"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
@ -59,7 +61,7 @@ func NewIndexAttrCache() *IndexAttrCache {
|
|||||||
func (c *IndexAttrCache) GetIndexResourceUsage(indexInfo *querypb.FieldIndexInfo, memoryIndexLoadPredictMemoryUsageFactor float64, fieldBinlog *datapb.FieldBinlog) (memory uint64, disk uint64, err error) {
|
func (c *IndexAttrCache) GetIndexResourceUsage(indexInfo *querypb.FieldIndexInfo, memoryIndexLoadPredictMemoryUsageFactor float64, fieldBinlog *datapb.FieldBinlog) (memory uint64, disk uint64, err error) {
|
||||||
indexType, err := funcutil.GetAttrByKeyFromRepeatedKV(common.IndexTypeKey, indexInfo.IndexParams)
|
indexType, err := funcutil.GetAttrByKeyFromRepeatedKV(common.IndexTypeKey, indexInfo.IndexParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, fmt.Errorf("index type not exist in index params")
|
return 0, 0, errors.New("index type not exist in index params")
|
||||||
}
|
}
|
||||||
if vecindexmgr.GetVecIndexMgrInstance().IsDiskANN(indexType) {
|
if vecindexmgr.GetVecIndexMgrInstance().IsDiskANN(indexType) {
|
||||||
neededMemSize := indexInfo.IndexSize / UsedDiskMemoryRatio
|
neededMemSize := indexInfo.IndexSize / UsedDiskMemoryRatio
|
||||||
|
|||||||
@ -45,6 +45,7 @@ type TimestampedRetrieveResult[T interface {
|
|||||||
Timestamps []int64
|
Timestamps []int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint
|
||||||
func (r *TimestampedRetrieveResult[T]) GetIds() *schemapb.IDs {
|
func (r *TimestampedRetrieveResult[T]) GetIds() *schemapb.IDs {
|
||||||
return r.Result.GetIds()
|
return r.Result.GetIds()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1098,7 +1098,7 @@ func (s *LocalSegment) innerLoadIndex(ctx context.Context,
|
|||||||
|
|
||||||
metricType, err := funcutil.GetAttrByKeyFromRepeatedKV(common.MetricTypeKey, indexInfo.IndexParams)
|
metricType, err := funcutil.GetAttrByKeyFromRepeatedKV(common.MetricTypeKey, indexInfo.IndexParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("metric type not exist in index params")
|
return errors.New("metric type not exist in index params")
|
||||||
}
|
}
|
||||||
|
|
||||||
if metricType == metric.BM25 {
|
if metricType == metric.BM25 {
|
||||||
|
|||||||
@ -582,7 +582,8 @@ func (suite *SegmentLoaderSuite) TestLoadIndexWithLimitedResource() {
|
|||||||
loadInfo: atomic.NewPointer[querypb.SegmentLoadInfo](loadInfo),
|
loadInfo: atomic.NewPointer[querypb.SegmentLoadInfo](loadInfo),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
paramtable.Get().QueryNodeCfg.DiskCapacityLimit.SwapTempValue("100000")
|
paramtable.Get().Save(paramtable.Get().QueryNodeCfg.DiskCapacityLimit.Key, "100000")
|
||||||
|
defer paramtable.Get().Reset(paramtable.Get().QueryNodeCfg.DiskCapacityLimit.Key)
|
||||||
err := suite.loader.LoadIndex(ctx, segment, loadInfo, 0)
|
err := suite.loader.LoadIndex(ctx, segment, loadInfo, 0)
|
||||||
suite.Error(err)
|
suite.Error(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
@ -126,7 +127,7 @@ func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.Coll
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
case schemapb.DataType_SparseFloatVector:
|
case schemapb.DataType_SparseFloatVector:
|
||||||
return nil, fmt.Errorf("SparseFloatVector not support in row based message")
|
return nil, errors.New("SparseFloatVector not support in row based message")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -40,6 +40,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
@ -163,7 +164,7 @@ func (node *QueryNode) initSession() error {
|
|||||||
sessionutil.WithScalarIndexEngineVersion(common.MinimalScalarIndexEngineVersion, common.CurrentScalarIndexEngineVersion),
|
sessionutil.WithScalarIndexEngineVersion(common.MinimalScalarIndexEngineVersion, common.CurrentScalarIndexEngineVersion),
|
||||||
sessionutil.WithIndexNonEncoding())
|
sessionutil.WithIndexNonEncoding())
|
||||||
if node.session == nil {
|
if node.session == nil {
|
||||||
return fmt.Errorf("session is nil, the etcd client connection may have failed")
|
return errors.New("session is nil, the etcd client connection may have failed")
|
||||||
}
|
}
|
||||||
node.session.Init(typeutil.QueryNodeRole, node.address, false, true)
|
node.session.Init(typeutil.QueryNodeRole, node.address, false, true)
|
||||||
sessionutil.SaveServerInfo(typeutil.QueryNodeRole, node.session.ServerID)
|
sessionutil.SaveServerInfo(typeutil.QueryNodeRole, node.session.ServerID)
|
||||||
@ -548,7 +549,7 @@ func (node *QueryNode) initHook() error {
|
|||||||
log := log.Ctx(node.ctx)
|
log := log.Ctx(node.ctx)
|
||||||
path := paramtable.Get().QueryNodeCfg.SoPath.GetValue()
|
path := paramtable.Get().QueryNodeCfg.SoPath.GetValue()
|
||||||
if path == "" {
|
if path == "" {
|
||||||
return fmt.Errorf("fail to set the plugin path")
|
return errors.New("fail to set the plugin path")
|
||||||
}
|
}
|
||||||
log.Info("start to load plugin", zap.String("path", path))
|
log.Info("start to load plugin", zap.String("path", path))
|
||||||
|
|
||||||
@ -565,7 +566,7 @@ func (node *QueryNode) initHook() error {
|
|||||||
|
|
||||||
hoo, ok := h.(optimizers.QueryHook)
|
hoo, ok := h.(optimizers.QueryHook)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("fail to convert the `Hook` interface")
|
return errors.New("fail to convert the `Hook` interface")
|
||||||
}
|
}
|
||||||
if err = hoo.Init(paramtable.Get().AutoIndexConfig.AutoIndexSearchConfig.GetValue()); err != nil {
|
if err = hoo.Init(paramtable.Get().AutoIndexConfig.AutoIndexSearchConfig.GetValue()); err != nil {
|
||||||
return fmt.Errorf("fail to init configs for the hook, error: %s", err.Error())
|
return fmt.Errorf("fail to init configs for the hook, error: %s", err.Error())
|
||||||
|
|||||||
@ -18,9 +18,9 @@ package rootcoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ func Test_alterAliasTask_Execute(t *testing.T) {
|
|||||||
mockMeta := mockrootcoord.NewIMetaTable(t)
|
mockMeta := mockrootcoord.NewIMetaTable(t)
|
||||||
mockMeta.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(111)
|
mockMeta.EXPECT().GetCollectionID(mock.Anything, mock.Anything, mock.Anything).Return(111)
|
||||||
mockMeta.EXPECT().AlterAlias(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
mockMeta.EXPECT().AlterAlias(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||||
Return(fmt.Errorf("failed to alter alias"))
|
Return(errors.New("failed to alter alias"))
|
||||||
core := newTestCore(withValidProxyManager(), withMeta(mockMeta))
|
core := newTestCore(withValidProxyManager(), withMeta(mockMeta))
|
||||||
task := &alterAliasTask{
|
task := &alterAliasTask{
|
||||||
baseTask: newBaseTask(context.Background(), core),
|
baseTask: newBaseTask(context.Background(), core),
|
||||||
|
|||||||
@ -18,7 +18,6 @@ package rootcoord
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
@ -44,7 +43,7 @@ type alterCollectionTask struct {
|
|||||||
|
|
||||||
func (a *alterCollectionTask) Prepare(ctx context.Context) error {
|
func (a *alterCollectionTask) Prepare(ctx context.Context) error {
|
||||||
if a.Req.GetCollectionName() == "" {
|
if a.Req.GetCollectionName() == "" {
|
||||||
return fmt.Errorf("alter collection failed, collection name does not exists")
|
return errors.New("alter collection failed, collection name does not exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -244,11 +243,11 @@ type alterCollectionFieldTask struct {
|
|||||||
|
|
||||||
func (a *alterCollectionFieldTask) Prepare(ctx context.Context) error {
|
func (a *alterCollectionFieldTask) Prepare(ctx context.Context) error {
|
||||||
if a.Req.GetCollectionName() == "" {
|
if a.Req.GetCollectionName() == "" {
|
||||||
return fmt.Errorf("alter collection field failed, collection name does not exists")
|
return errors.New("alter collection field failed, collection name does not exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Req.GetFieldName() == "" {
|
if a.Req.GetFieldName() == "" {
|
||||||
return fmt.Errorf("alter collection field failed, filed name does not exists")
|
return errors.New("alter collection field failed, filed name does not exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user