mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 09:38:39 +08:00
Update doc:detailed interface definition
Signed-off-by: GuoRentong <rentong.guo@zilliz.com>
This commit is contained in:
parent
01781103a9
commit
f3d2afa4f4
@ -2,7 +2,7 @@
|
|||||||
"name": "Milvus Distributed Dev Container Definition",
|
"name": "Milvus Distributed Dev Container Definition",
|
||||||
"dockerComposeFile": ["./docker-compose-vscode.yml"],
|
"dockerComposeFile": ["./docker-compose-vscode.yml"],
|
||||||
"service": "ubuntu",
|
"service": "ubuntu",
|
||||||
"initializeCommand": "scripts/init_devcontainer.sh && docker-compose -f docker-compose-vscode.yml down || true",
|
"initializeCommand": "scripts/init_devcontainer.sh && docker-compose -f docker-compose-vscode.yml down || true && docker-compose -f docker-compose-vscode.yml pull --ignore-pull-failures ubuntu",
|
||||||
"workspaceFolder": "/go/src/github.com/zilliztech/milvus-distributed",
|
"workspaceFolder": "/go/src/github.com/zilliztech/milvus-distributed",
|
||||||
"shutdownAction": "stopCompose",
|
"shutdownAction": "stopCompose",
|
||||||
"extensions": [
|
"extensions": [
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,6 +4,7 @@
|
|||||||
**/cmake-build-release/*
|
**/cmake-build-release/*
|
||||||
internal/core/output/*
|
internal/core/output/*
|
||||||
internal/core/build/*
|
internal/core/build/*
|
||||||
|
internal/kv/rocksdb/cwrapper/output/*
|
||||||
**/.idea/*
|
**/.idea/*
|
||||||
pulsar/client-cpp/build/
|
pulsar/client-cpp/build/
|
||||||
pulsar/client-cpp/build/*
|
pulsar/client-cpp/build/*
|
||||||
@ -11,7 +12,7 @@ pulsar/client-cpp/build/*
|
|||||||
# vscode generated files
|
# vscode generated files
|
||||||
.vscode
|
.vscode
|
||||||
docker-compose-vscode.yml
|
docker-compose-vscode.yml
|
||||||
docker-compose-vscode.yml.tmp
|
docker-compose-vscode.yml.bak
|
||||||
|
|
||||||
cmake-build-debug
|
cmake-build-debug
|
||||||
cmake-build-release
|
cmake-build-release
|
||||||
|
|||||||
@ -1,20 +1,22 @@
|
|||||||
timeout(time: 20, unit: 'MINUTES') {
|
timeout(time: 20, unit: 'MINUTES') {
|
||||||
|
dir ("scripts") {
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/check_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"Ccache artfactory files not found!\"'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"Ccache artfactory files not found!\"'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/check_cache.sh -l $GO_CACHE_ARTFACTORY_URL --cache_dir=\$(go env GOCACHE) -f go-cache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"Go cache artfactory files not found!\"'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $GO_CACHE_ARTFACTORY_URL --cache_dir=\$(go env GOCACHE) -f go-cache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"Go cache artfactory files not found!\"'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/check_cache.sh -l $THIRDPARTY_ARTFACTORY_URL --cache_dir=$CUSTOM_THIRDPARTY_PATH -f thirdparty-download.tar.gz || echo \"Thirdparty artfactory files not found!\"'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $THIRDPARTY_ARTFACTORY_URL --cache_dir=$CUSTOM_THIRDPARTY_PATH -f thirdparty-download.tar.gz || echo \"Thirdparty artfactory files not found!\"'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/check_cache.sh -l $GO_MOD_ARTFACTORY_URL --cache_dir=\$GOPATH/pkg/mod -f milvus-distributed-go-mod-\$(md5sum go.mod).tar.gz || echo \"Go mod artfactory files not found!\"'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $GO_MOD_ARTFACTORY_URL --cache_dir=\$GOPATH/pkg/mod -f milvus-distributed-go-mod-cache.tar.gz || echo \"Go mod artfactory files not found!\"'
|
||||||
|
}
|
||||||
|
|
||||||
// Zero the cache statistics (but not the configuration options)
|
// Zero the cache statistics (but not the configuration options)
|
||||||
sh 'ccache -z'
|
sh 'ccache -z'
|
||||||
sh '. ./scripts/before-install.sh && make install'
|
sh '. ./scripts/before-install.sh && make install'
|
||||||
sh 'echo -e "===\n=== ccache statistics after build\n===" && ccache --show-stats'
|
sh 'echo -e "===\n=== ccache statistics after build\n===" && ccache --show-stats'
|
||||||
|
|
||||||
|
dir ("scripts") {
|
||||||
withCredentials([usernamePassword(credentialsId: "${env.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
withCredentials([usernamePassword(credentialsId: "${env.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/update_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./update_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/update_cache.sh -l $GO_CACHE_ARTFACTORY_URL --cache_dir=\$(go env GOCACHE) -f go-cache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./update_cache.sh -l $GO_CACHE_ARTFACTORY_URL --cache_dir=\$(go env GOCACHE) -f go-cache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/update_cache.sh -l $THIRDPARTY_ARTFACTORY_URL --cache_dir=$CUSTOM_THIRDPARTY_PATH -f thirdparty-download.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./update_cache.sh -l $THIRDPARTY_ARTFACTORY_URL --cache_dir=$CUSTOM_THIRDPARTY_PATH -f thirdparty-download.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
||||||
sh '. ./scripts/before-install.sh && unset http_proxy && unset https_proxy && ./scripts/update_cache.sh -l $GO_MOD_ARTFACTORY_URL --cache_dir=\$GOPATH/pkg/mod -f milvus-distributed-go-mod-\$(md5sum go.mod).tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./update_cache.sh -l $GO_MOD_ARTFACTORY_URL --cache_dir=\$GOPATH/pkg/mod -f milvus-distributed-go-mod-cache.tar.gz -u ${USERNAME} -p ${PASSWORD}'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,14 +31,12 @@ try {
|
|||||||
} catch(exc) {
|
} catch(exc) {
|
||||||
throw exc
|
throw exc
|
||||||
} finally {
|
} finally {
|
||||||
dir ('build/docker/deploy') {
|
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} ps | tail -n +3 | awk \'{ print $1 }\' | ( while read arg; do docker logs -t $arg > $arg.log 2>&1; done )'
|
|
||||||
archiveArtifacts artifacts: "**.log", allowEmptyArchive: true
|
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
|
|
||||||
}
|
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v pulsar'
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v pulsar'
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v etcd'
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v etcd'
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v minio'
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v minio'
|
||||||
|
dir ('build/docker/deploy') {
|
||||||
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
|
||||||
|
}
|
||||||
dir ('build/docker/test') {
|
dir ('build/docker/test') {
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression /bin/bash -c "rm -rf __pycache__ && rm -rf .pytest_cache"'
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression /bin/bash -c "rm -rf __pycache__ && rm -rf .pytest_cache"'
|
||||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
|
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
|
||||||
|
|||||||
22
Makefile
22
Makefile
@ -57,8 +57,14 @@ lint:tools/bin/revive
|
|||||||
@echo "Running $@ check"
|
@echo "Running $@ check"
|
||||||
@tools/bin/revive -formatter friendly -config tools/check/revive.toml ./...
|
@tools/bin/revive -formatter friendly -config tools/check/revive.toml ./...
|
||||||
|
|
||||||
|
get-rocksdb:
|
||||||
|
@go env -w CGO_CFLAGS="-I$(PWD)/internal/kv/rocksdb/cwrapper/output/include"
|
||||||
|
@go env -w CGO_LDFLAGS="-L$(PWD)/internal/kv/rocksdb/cwrapper/output/lib -l:librocksdb.a -lstdc++ -lm -lz"
|
||||||
|
@(env bash $(PWD)/internal/kv/rocksdb/cwrapper/build.sh)
|
||||||
|
@go get github.com/tecbot/gorocksdb
|
||||||
|
|
||||||
#TODO: Check code specifications by golangci-lint
|
#TODO: Check code specifications by golangci-lint
|
||||||
static-check:
|
static-check:get-rocksdb
|
||||||
@echo "Running $@ check"
|
@echo "Running $@ check"
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=30m --config ./.golangci.yml ./internal/...
|
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=30m --config ./.golangci.yml ./internal/...
|
||||||
@ -106,7 +112,7 @@ build-cpp-with-unittest:
|
|||||||
unittest: test-cpp test-go
|
unittest: test-cpp test-go
|
||||||
|
|
||||||
#TODO: proxy master query node writer's unittest
|
#TODO: proxy master query node writer's unittest
|
||||||
test-go:
|
test-go:get-rocksdb
|
||||||
@echo "Running go unittests..."
|
@echo "Running go unittests..."
|
||||||
@(env bash $(PWD)/scripts/run_go_unittest.sh)
|
@(env bash $(PWD)/scripts/run_go_unittest.sh)
|
||||||
|
|
||||||
@ -127,6 +133,7 @@ install: all
|
|||||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
|
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
|
||||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxy $(GOPATH)/bin/proxy
|
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxy $(GOPATH)/bin/proxy
|
||||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
|
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
|
||||||
|
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexbuilder $(GOPATH)/bin/indexbuilder
|
||||||
@mkdir -p $(LIBRARY_PATH) && cp -f $(PWD)/internal/core/output/lib/* $(LIBRARY_PATH)
|
@mkdir -p $(LIBRARY_PATH) && cp -f $(PWD)/internal/core/output/lib/* $(LIBRARY_PATH)
|
||||||
@echo "Installation successful."
|
@echo "Installation successful."
|
||||||
|
|
||||||
@ -134,7 +141,10 @@ clean:
|
|||||||
@echo "Cleaning up all the generated files"
|
@echo "Cleaning up all the generated files"
|
||||||
@find . -name '*.test' | xargs rm -fv
|
@find . -name '*.test' | xargs rm -fv
|
||||||
@find . -name '*~' | xargs rm -fv
|
@find . -name '*~' | xargs rm -fv
|
||||||
@rm -rvf querynode
|
@rm -rf bin/
|
||||||
@rm -rvf master
|
@rm -rf lib/
|
||||||
@rm -rvf proxy
|
@rm -rf $(GOPATH)/bin/master
|
||||||
@rm -rvf writenode
|
@rm -rf $(GOPATH)/bin/proxy
|
||||||
|
@rm -rf $(GOPATH)/bin/querynode
|
||||||
|
@rm -rf $(GOPATH)/bin/writenode
|
||||||
|
@rm -rf $(GOPATH)/bin/indexbuilder
|
||||||
|
|||||||
@ -6,4 +6,4 @@ PULSAR_ADDRESS=pulsar://pulsar:6650
|
|||||||
ETCD_ADDRESS=etcd:2379
|
ETCD_ADDRESS=etcd:2379
|
||||||
MASTER_ADDRESS=master:53100
|
MASTER_ADDRESS=master:53100
|
||||||
MINIO_ADDRESS=minio:9000
|
MINIO_ADDRESS=minio:9000
|
||||||
INDEX_BUILDER_ADDRESS=indexbuilder:31000
|
INDEX_BUILDER_ADDRESS=indexbuider:31000
|
||||||
|
|||||||
@ -9,31 +9,12 @@
|
|||||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
FROM milvusdb/milvus-distributed-dev:amd64-ubuntu18.04-latest AS openblas
|
FROM alpine:3.12.1
|
||||||
|
|
||||||
#FROM alpine
|
|
||||||
FROM ubuntu:bionic-20200921
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends libtbb-dev gfortran
|
|
||||||
|
|
||||||
#RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories
|
|
||||||
|
|
||||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories \
|
|
||||||
# && apk add --no-cache libtbb gfortran
|
|
||||||
|
|
||||||
COPY --from=openblas /usr/lib/libopenblas-r0.3.9.so /usr/lib/
|
|
||||||
|
|
||||||
RUN ln -s /usr/lib/libopenblas-r0.3.9.so /usr/lib/libopenblas.so.0 && \
|
|
||||||
ln -s /usr/lib/libopenblas.so.0 /usr/lib/libopenblas.so
|
|
||||||
|
|
||||||
COPY ./bin/indexbuilder /milvus-distributed/bin/indexbuilder
|
COPY ./bin/indexbuilder /milvus-distributed/bin/indexbuilder
|
||||||
|
|
||||||
COPY ./configs/ /milvus-distributed/configs/
|
COPY ./configs/ /milvus-distributed/configs/
|
||||||
|
|
||||||
COPY ./lib/ /milvus-distributed/lib/
|
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=/milvus-distributed/lib:$LD_LIBRARY_PATH:/usr/lib
|
|
||||||
|
|
||||||
WORKDIR /milvus-distributed/
|
WORKDIR /milvus-distributed/
|
||||||
|
|
||||||
CMD ["./bin/indexbuilder"]
|
CMD ["./bin/indexbuilder"]
|
||||||
|
|||||||
@ -36,6 +36,14 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- milvus
|
- milvus
|
||||||
|
|
||||||
|
jaeger:
|
||||||
|
image: jaegertracing/all-in-one:latest
|
||||||
|
ports:
|
||||||
|
- "6831:6831/udp"
|
||||||
|
- "16686:16686"
|
||||||
|
networks:
|
||||||
|
- milvus
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
milvus:
|
milvus:
|
||||||
|
|
||||||
|
|||||||
@ -86,5 +86,10 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- milvus
|
- milvus
|
||||||
|
|
||||||
|
jaeger:
|
||||||
|
image: jaegertracing/all-in-one:latest
|
||||||
|
networks:
|
||||||
|
- milvus
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
milvus:
|
milvus:
|
||||||
|
|||||||
@ -13,8 +13,8 @@
|
|||||||
```go
|
```go
|
||||||
type Client interface {
|
type Client interface {
|
||||||
BuildIndex(req BuildIndexRequest) (BuildIndexResponse, error)
|
BuildIndex(req BuildIndexRequest) (BuildIndexResponse, error)
|
||||||
DescribeIndex(indexID UniqueID) (IndexDescription, error)
|
GetIndexStates(req IndexStatesRequest) (IndexStatesResponse, error)
|
||||||
GetIndexFilePaths(indexID UniqueID) (IndexFilePaths, error)
|
GetIndexFilePaths(req IndexFilePathRequest) (IndexFilePathsResponse, error)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -36,19 +36,23 @@ type BuildIndexResponse struct {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
* *DescribeIndex*
|
* *GetIndexStates*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
enum IndexStatus {
|
type IndexStatesRequest struct {
|
||||||
|
IndexID UniqueID
|
||||||
|
}
|
||||||
|
|
||||||
|
enum IndexState {
|
||||||
NONE = 0;
|
NONE = 0;
|
||||||
UNISSUED = 1;
|
UNISSUED = 1;
|
||||||
INPROGRESS = 2;
|
INPROGRESS = 2;
|
||||||
FINISHED = 3;
|
FINISHED = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
type IndexDescription struct {
|
type IndexStatesResponse struct {
|
||||||
ID UniqueID
|
ID UniqueID
|
||||||
Status IndexStatus
|
State IndexState
|
||||||
EnqueueTime time.Time
|
EnqueueTime time.Time
|
||||||
ScheduleTime time.Time
|
ScheduleTime time.Time
|
||||||
BuildCompleteTime time.Time
|
BuildCompleteTime time.Time
|
||||||
@ -60,8 +64,28 @@ type IndexDescription struct {
|
|||||||
* *GetIndexFilePaths*
|
* *GetIndexFilePaths*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type IndexFilePaths struct {
|
type IndexFilePathRequest struct {
|
||||||
|
IndexID UniqueID
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexFilePathsResponse struct {
|
||||||
FilePaths []string
|
FilePaths []string
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 8.3 Index Node
|
||||||
|
|
||||||
|
```go
|
||||||
|
type IndexNode interface {
|
||||||
|
Start() error
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// SetTimeTickChannel(channelID string) error
|
||||||
|
SetStatsChannel(channelID string) error
|
||||||
|
|
||||||
|
BuildIndex(req BuildIndexRequest) (BuildIndexResponse, error)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
@ -1,5 +1,3 @@
|
|||||||
|
|
||||||
|
|
||||||
## 8. Message Stream Service
|
## 8. Message Stream Service
|
||||||
|
|
||||||
|
|
||||||
@ -8,13 +6,13 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 8.2 API
|
#### 8.2 Message Stream Service API
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type Client interface {
|
type Client interface {
|
||||||
CreateChannels(req CreateChannelRequest) (ChannelID []string, error)
|
CreateChannels(req CreateChannelRequest) (CreateChannelResponse, error)
|
||||||
DestoryChannels(channelID []string) error
|
DestoryChannels(req DestoryChannelRequest) error
|
||||||
DescribeChannels(channelID []string) (ChannelDescriptions, error)
|
DescribeChannels(req DescribeChannelRequest) (DescribeChannelResponse, error)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -30,7 +28,19 @@ type OwnerDescription struct {
|
|||||||
|
|
||||||
type CreateChannelRequest struct {
|
type CreateChannelRequest struct {
|
||||||
OwnerDescription OwnerDescription
|
OwnerDescription OwnerDescription
|
||||||
numChannels int
|
NumChannels int
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateChannelResponse struct {
|
||||||
|
ChannelIDs []string
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *DestoryChannels*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type DestoryChannelRequest struct {
|
||||||
|
ChannelIDs []string
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -39,19 +49,22 @@ type CreateChannelRequest struct {
|
|||||||
* *DescribeChannels*
|
* *DescribeChannels*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
type DescribeChannelRequest struct {
|
||||||
|
ChannelIDs []string
|
||||||
|
}
|
||||||
|
|
||||||
type ChannelDescription struct {
|
type ChannelDescription struct {
|
||||||
|
ChannelID string
|
||||||
Owner OwnerDescription
|
Owner OwnerDescription
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChannelDescriptions struct {
|
type DescribeChannelResponse struct {
|
||||||
Descriptions []ChannelDescription
|
Descriptions []ChannelDescription
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### A.3 Message Stream
|
#### A.3 Message Stream
|
||||||
|
|
||||||
``` go
|
``` go
|
||||||
@ -60,7 +73,7 @@ const {
|
|||||||
kInsert MsgType = 400
|
kInsert MsgType = 400
|
||||||
kDelete MsgType = 401
|
kDelete MsgType = 401
|
||||||
kSearch MsgType = 500
|
kSearch MsgType = 500
|
||||||
KSearchResult MsgType = 1000
|
kSearchResult MsgType = 1000
|
||||||
|
|
||||||
kSegStatistics MsgType = 1100
|
kSegStatistics MsgType = 1100
|
||||||
|
|
||||||
@ -154,3 +167,55 @@ func NewUnmarshalDispatcher() *UnmarshalDispatcher
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### A.4 RocksMQ
|
||||||
|
|
||||||
|
RocksMQ is a RocksDB-based messaging/streaming library.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ProducerMessage struct {
|
||||||
|
Key string
|
||||||
|
Payload []byte
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ConsumerMessage struct {
|
||||||
|
MsgID MessageID
|
||||||
|
Key string
|
||||||
|
Payload []byte
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```GO
|
||||||
|
type RocksMQ struct {
|
||||||
|
CreateChannel(channelName string) error
|
||||||
|
DestroyChannel(channelName string) error
|
||||||
|
CreateConsumerGroup(groupName string) error
|
||||||
|
DestroyConsumerGroup(groupName string) error
|
||||||
|
|
||||||
|
Produce(channelName string, messages []ProducerMessage) error
|
||||||
|
Consume(groupName string, channelName string, n int) ([]ConsumerMessage, error)
|
||||||
|
Seek(groupName string, channelName string, msgID MessageID) error
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### A.4.1 Meta
|
||||||
|
|
||||||
|
* channel meta
|
||||||
|
|
||||||
|
```go
|
||||||
|
"$(channel_name)/start_id", MessageID
|
||||||
|
"$(channel_name)/end_id", MessageID
|
||||||
|
```
|
||||||
|
|
||||||
|
* consumer group meta
|
||||||
|
|
||||||
|
```go
|
||||||
|
"$(group_name)/$(channel_name)/current_id", MessageID
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
@ -12,23 +12,60 @@
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
type Client interface {
|
type Client interface {
|
||||||
|
RegisterLink() (ProxyInfo, error)
|
||||||
|
RegisterNode(req NodeInfo) (InitParams, error)
|
||||||
GetTimeTickChannel() (string, error)
|
GetTimeTickChannel() (string, error)
|
||||||
GetStatsChannel() (string, error)
|
GetStatsChannel() (string, error)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* *RegisterLink*
|
||||||
|
|
||||||
#### 6.1 Gateway API
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type ProxyInfo struct {
|
type ProxyInfo struct {
|
||||||
Address string
|
Address string
|
||||||
Port int32
|
Port int32
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
type Client interface {
|
* *RegisterNode*
|
||||||
RegisterLink() (ProxyInfo, error)
|
|
||||||
|
```go
|
||||||
|
type NodeInfo struct {}
|
||||||
|
|
||||||
|
type InitParams struct {}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 6.0 ProxyNode
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ProxyNode interface {
|
||||||
|
Start() error
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
SetTimeTickChannel(channelID string) error
|
||||||
|
SetStatsChannel(channelID string) error
|
||||||
|
|
||||||
|
CreateCollection(req CreateCollectionRequest) error
|
||||||
|
DropCollection(req DropCollectionRequest) error
|
||||||
|
HasCollection(req HasCollectionRequest) (bool, error)
|
||||||
|
DescribeCollection(req DescribeCollectionRequest) (DescribeCollectionResponse, error)
|
||||||
|
GetCollectionStatistics(req CollectionStatsRequest) (CollectionStatsResponse, error)
|
||||||
|
ShowCollections(req ShowCollectionRequest) ([]string, error)
|
||||||
|
|
||||||
|
CreatePartition(req CreatePartitionRequest) error
|
||||||
|
DropPartition(req DropPartitionRequest) error
|
||||||
|
HasPartition(req HasPartitionRequest) (bool, error)
|
||||||
|
GetPartitionStatistics(req PartitionStatsRequest) (PartitionStatsResponse, error)
|
||||||
|
ShowPartitions(req ShowPartitionRequest) ([]string, error)
|
||||||
|
|
||||||
|
CreateIndex(req CreateIndexRequest) error
|
||||||
|
DescribeIndex(DescribeIndexRequest) (DescribeIndexResponse, error)
|
||||||
|
|
||||||
|
Insert(req RowBatch) (InsertResponse, error)
|
||||||
|
Search(req SearchRequest) (SearchResults, error)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -12,15 +12,22 @@ type Client interface {
|
|||||||
CreateCollection(req CreateCollectionRequest) error
|
CreateCollection(req CreateCollectionRequest) error
|
||||||
DropCollection(req DropCollectionRequest) error
|
DropCollection(req DropCollectionRequest) error
|
||||||
HasCollection(req HasCollectionRequest) (bool, error)
|
HasCollection(req HasCollectionRequest) (bool, error)
|
||||||
DescribeCollection(req DescribeCollectionRequest) (CollectionDescription, error)
|
DescribeCollection(req DescribeCollectionRequest) (CollectionDescriptionResponse, error)
|
||||||
ShowCollections(req ShowCollectionRequest) ([]string, error)
|
GetCollectionStatistics(req CollectionStatsRequest) (CollectionStatsResponse, error)
|
||||||
|
ShowCollections(req ShowCollectionRequest) (ShowCollectionResponse, error)
|
||||||
|
|
||||||
CreatePartition(req CreatePartitionRequest) error
|
CreatePartition(req CreatePartitionRequest) error
|
||||||
DropPartition(req DropPartitionRequest) error
|
DropPartition(req DropPartitionRequest) error
|
||||||
HasPartition(req HasPartitionRequest) (bool, error)
|
HasPartition(req HasPartitionRequest) (bool, error)
|
||||||
DescribePartition(req DescribePartitionRequest) (PartitionDescription, error)
|
GetPartitionStatistics(req PartitionStatsRequest) (PartitionStatsResponse, error)
|
||||||
ShowPartitions(req ShowPartitionRequest) ([]string, error)
|
ShowPartitions(req ShowPartitionRequest) (ShowPartitionResponse, error)
|
||||||
|
|
||||||
|
CreateIndex(req CreateIndexRequest) error
|
||||||
|
DescribeIndex(DescribeIndexRequest) (DescribeIndexResponse, error)
|
||||||
|
|
||||||
AllocTimestamp(req TsoRequest) (TsoResponse, error)
|
AllocTimestamp(req TsoRequest) (TsoResponse, error)
|
||||||
AllocID(req IDRequest) (IDResponse, error)
|
AllocID(req IDRequest) (IDResponse, error)
|
||||||
|
|
||||||
GetDdChannel() (string, error)
|
GetDdChannel() (string, error)
|
||||||
GetTimeTickChannel() (string, error)
|
GetTimeTickChannel() (string, error)
|
||||||
GetStatsChannel() (string, error)
|
GetStatsChannel() (string, error)
|
||||||
@ -29,6 +36,81 @@ type Client interface {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *DescribeCollection*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type DescribeCollectionRequest struct {
|
||||||
|
CollectionName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CollectionDescriptionResponse struct {
|
||||||
|
Schema CollectionSchema
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *GetCollectionStatistics*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type CollectionStatsRequest struct {
|
||||||
|
CollectionName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CollectionStatsResponse struct {
|
||||||
|
Stats []KeyValuePair
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *ShowCollections*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ShowCollectionResponse struct {
|
||||||
|
CollectionNames []string
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *GetPartitionStatistics*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type PartitionStatsRequest struct {
|
||||||
|
CollectionName string
|
||||||
|
PartitionTag string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PartitionStatsResponse struct {
|
||||||
|
Stats []KeyValuePair
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *ShowPartitions*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ShowPartitionResponse struct {
|
||||||
|
PartitionTags []string
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *DescribeIndex*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type DescribeIndexRequest struct {
|
||||||
|
CollectionName string
|
||||||
|
FieldName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexDescription struct {
|
||||||
|
IndexName string
|
||||||
|
params []KeyValuePair
|
||||||
|
}
|
||||||
|
|
||||||
|
type DescribeIndexResponse struct {
|
||||||
|
IndexDescriptions []IndexDescription
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 10.1 Interfaces (RPC)
|
#### 10.1 Interfaces (RPC)
|
||||||
|
|
||||||
| RPC | description |
|
| RPC | description |
|
||||||
|
|||||||
@ -2,20 +2,21 @@
|
|||||||
|
|
||||||
## 8. Query Service
|
## 8. Query Service
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 8.1 Overview
|
#### 8.1 Overview
|
||||||
|
|
||||||
<img src="./figs/query_service.jpeg" width=700>
|
<img src="./figs/query_service.jpeg" width=700>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 8.2 API
|
#### 8.2 Query Service API
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type Client interface {
|
type Client interface {
|
||||||
DescribeService() (ServiceDescription, error)
|
RegisterNode(req NodeInfo) (InitParams, error)
|
||||||
DescribeParition(req DescribeParitionRequest) (PartitionDescriptions, error)
|
GetServiceStates() (ServiceStatesResponse, error)
|
||||||
|
ShowCollections(req ShowCollectionRequest) (ShowCollectionResponse, error)
|
||||||
|
ShowPartitions(req ShowPartitionRequest) (ShowPartitionResponse, error)
|
||||||
|
GetPartitionStates(req PartitionStatesRequest) (PartitionStatesResponse, error)
|
||||||
LoadPartitions(req LoadPartitonRequest) error
|
LoadPartitions(req LoadPartitonRequest) error
|
||||||
ReleasePartitions(req ReleasePartitionRequest) error
|
ReleasePartitions(req ReleasePartitionRequest) error
|
||||||
CreateQueryChannel() (QueryChannels, error)
|
CreateQueryChannel() (QueryChannels, error)
|
||||||
@ -26,62 +27,95 @@ type Client interface {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
* *DescribeService*
|
* *RegisterNode*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type QueryNodeDescription struct {
|
type NodeInfo struct {}
|
||||||
ResourceCost ResourceCost
|
|
||||||
|
type InitParams struct {}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *GetServiceStates*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type NodeState = int
|
||||||
|
|
||||||
|
const (
|
||||||
|
INITIALIZING NodeState = 0
|
||||||
|
HEALTHY NodeState = 1
|
||||||
|
ABNORMAL NodeState = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
//type ResourceCost struct {
|
||||||
|
// MemUsage int64
|
||||||
|
// CpuUsage float32
|
||||||
|
//}
|
||||||
|
|
||||||
|
type QueryNodeStates struct {
|
||||||
|
NodeState NodeState
|
||||||
|
//ResourceCost ResourceCost
|
||||||
}
|
}
|
||||||
|
|
||||||
type CollectionDescription struct {
|
type ServiceStatesResponse struct {
|
||||||
ParitionIDs []UniqueID
|
ServiceState NodeState
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *ShowCollections*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ShowCollectionRequest struct {
|
||||||
|
DbID UniqueID
|
||||||
}
|
}
|
||||||
|
|
||||||
type DbDescription struct {
|
type ShowCollectionResponse struct {
|
||||||
CollectionDescriptions []CollectionDescription
|
CollectionIDs []UniqueID
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* *ShowPartitions*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ShowPartitionRequest struct {
|
||||||
|
DbID UniqueID
|
||||||
|
CollectionID UniqueID
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceDescription struct {
|
type ShowPartitionResponse struct {
|
||||||
DbDescriptions map[UniqueID]DbDescription
|
PartitionIDs []UniqueID
|
||||||
NodeDescriptions map[UniqueID]QueryNodeDescription
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
* *DescribeParition*
|
* *GetPartitionStates*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type DescribeParitionRequest struct {
|
type PartitionState = int
|
||||||
|
|
||||||
|
const (
|
||||||
|
NOT_EXIST PartitionState = 0
|
||||||
|
NOT_PRESENT PartitionState = 1
|
||||||
|
ON_DISK PartitionState = 2
|
||||||
|
PARTIAL_IN_MEMORY PartitionState = 3
|
||||||
|
IN_MEMORY PartitionState = 4
|
||||||
|
PARTIAL_IN_GPU PartitionState = 5
|
||||||
|
IN_GPU PartitionState = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
type PartitionStatesRequest struct {
|
||||||
DbID UniqueID
|
DbID UniqueID
|
||||||
CollectionID UniqueID
|
CollectionID UniqueID
|
||||||
partitionIDs []UniqueID
|
partitionIDs []UniqueID
|
||||||
}
|
}
|
||||||
|
|
||||||
type PartitionState = int
|
type PartitionStates struct {
|
||||||
|
PartitionID UniqueID
|
||||||
const (
|
|
||||||
NOT_EXIST PartitionState = 0
|
|
||||||
ON_DISK PartitionState = 1
|
|
||||||
PARTIAL_IN_MEMORY PartitionState = 2
|
|
||||||
IN_MEMORY PartitionState = 3
|
|
||||||
PARTIAL_IN_GPU PartitionState = 4
|
|
||||||
IN_GPU PartitionState = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
type ResourceCost struct {
|
|
||||||
MemUsage int64
|
|
||||||
CpuUsage float32
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartitionDescription struct {
|
|
||||||
ID UniqueID
|
|
||||||
State PartitionState
|
State PartitionState
|
||||||
ResourceCost ResourceCost
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PartitionDescriptions struct {
|
type PartitionStatesResponse struct {
|
||||||
PartitionDescriptions []PartitionDescription
|
States []PartitionStates
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -122,6 +156,33 @@ type ReleasePartitionRequest struct {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *LoadSegments*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type LoadSegmentRequest struct {
|
||||||
|
DbID UniqueID
|
||||||
|
CollectionID UniqueID
|
||||||
|
PartitionID UniqueID
|
||||||
|
SegmentIDs []UniqueID
|
||||||
|
FieldIDs []int64
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *ReleaseSegments*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ReleaseSegmentRequest struct {
|
||||||
|
DbID UniqueID
|
||||||
|
CollectionID UniqueID
|
||||||
|
PartitionID UniqueID
|
||||||
|
SegmentIDs []UniqueID
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 8.2 Query Node
|
#### 8.2 Query Node
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@ -129,23 +190,20 @@ type QueryNode interface {
|
|||||||
Start() error
|
Start() error
|
||||||
Close() error
|
Close() error
|
||||||
|
|
||||||
AddQueryStream(requestStream MsgStream, resultStream MsgStream) error
|
AddQueryChannel(channelIDs QueryChannels) error
|
||||||
RemoveQueryStream(requestStreamID string) error
|
RemoveQueryChannel(channelIDs QueryChannels) error
|
||||||
WatchDmStreams(insertStreams MsgStream) error
|
WatchDmChannels(insertChannelIDs []string) error
|
||||||
WatchDdStream(stream MsgStream) error
|
//SetTimeTickChannel(channelID string) error
|
||||||
SetTimeTickStream(stream MsgStream) error
|
//SetStatsChannel(channelID string) error
|
||||||
SetStatsStream(stream MsgStream) error
|
|
||||||
|
|
||||||
LoadSegments(DbID UniqueID, CollID UniqueID, PartitionID UniqueID, SegIDs []UniqueID, FieldIDs []int64) error
|
LoadSegments(req LoadSegmentRequest) error
|
||||||
ReleaseSegments(DbID UniqueID, CollID UniqueID, PartitionID UniqueID, SegIDs []UniqueID) error
|
ReleaseSegments(req ReleaseSegmentRequest) error
|
||||||
DescribeParition(DbID UniqueID, CollID UniqueID, PartitionID UniqueID) (PartitionDescription, error)
|
DescribeParition(req DescribeParitionRequest) (PartitionDescriptions, error)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 8.2 Collection Replica
|
#### 8.2 Collection Replica
|
||||||
|
|
||||||
$collectionReplica$ contains a in-memory local copy of persistent collections. In common cases, the system has multiple query nodes. Data of a collection will be distributed across all the available query nodes, and each query node's $collectionReplica$ will maintain its own share (only part of the collection).
|
$collectionReplica$ contains a in-memory local copy of persistent collections. In common cases, the system has multiple query nodes. Data of a collection will be distributed across all the available query nodes, and each query node's $collectionReplica$ will maintain its own share (only part of the collection).
|
||||||
|
|||||||
@ -8,14 +8,17 @@
|
|||||||
|
|
||||||
<img src="./figs/data_service.jpeg" width=700>
|
<img src="./figs/data_service.jpeg" width=700>
|
||||||
|
|
||||||
|
#### 8.2 Data Service API
|
||||||
#### 8.2 API
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type Client interface {
|
type Client interface {
|
||||||
|
RegisterNode(req NodeInfo) (InitParams, error)
|
||||||
AssignSegmentID(req AssignSegIDRequest) (AssignSegIDResponse, error)
|
AssignSegmentID(req AssignSegIDRequest) (AssignSegIDResponse, error)
|
||||||
Flush(req FlushRequest) (error)
|
Flush(req FlushRequest) error
|
||||||
|
ShowSegments(req ShowSegmentRequest) (ShowSegmentResponse, error)
|
||||||
|
GetSegmentStates(req SegmentStatesRequest) (SegmentStatesResponse, error)
|
||||||
GetInsertBinlogPaths(req InsertBinlogPathRequest) (InsertBinlogPathsResponse, error)
|
GetInsertBinlogPaths(req InsertBinlogPathRequest) (InsertBinlogPathsResponse, error)
|
||||||
|
|
||||||
GetInsertChannels(req InsertChannelRequest) ([]string, error)
|
GetInsertChannels(req InsertChannelRequest) ([]string, error)
|
||||||
GetTimeTickChannel() (string, error)
|
GetTimeTickChannel() (string, error)
|
||||||
GetStatsChannel() (string, error)
|
GetStatsChannel() (string, error)
|
||||||
@ -24,6 +27,14 @@ type Client interface {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *RegisterNode*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type NodeInfo struct {}
|
||||||
|
|
||||||
|
type InitParams struct {}
|
||||||
|
```
|
||||||
|
|
||||||
* *AssignSegmentID*
|
* *AssignSegmentID*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@ -65,15 +76,53 @@ type FlushRequest struct {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *ShowSegments*
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ShowSegmentRequest struct {
|
||||||
|
CollectionID UniqueID
|
||||||
|
PartitionID UniqueID
|
||||||
|
}
|
||||||
|
|
||||||
|
type ShowSegmentResponse struct {
|
||||||
|
SegmentIDs []UniqueID
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* *GetSegmentStates*
|
||||||
|
|
||||||
|
```go
|
||||||
|
enum SegmentState {
|
||||||
|
NONE = 0;
|
||||||
|
NOT_EXIST = 1;
|
||||||
|
GROWING = 2;
|
||||||
|
SEALED = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
type SegmentStatesRequest struct {
|
||||||
|
SegmentID UniqueID
|
||||||
|
}
|
||||||
|
|
||||||
|
type SegmentStatesResponse struct {
|
||||||
|
State SegmentState
|
||||||
|
CreateTime Timestamp
|
||||||
|
SealedTime Timestamp
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
* *GetInsertBinlogPaths*
|
* *GetInsertBinlogPaths*
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type InsertBinlogPathRequest struct {
|
type InsertBinlogPathRequest struct {
|
||||||
segmentID UniqueID
|
SegmentID UniqueID
|
||||||
}
|
}
|
||||||
|
|
||||||
type InsertBinlogPathsResponse struct {
|
type InsertBinlogPathsResponse struct {
|
||||||
FieldIdxToPaths map[int32][]string
|
FieldIDToPaths map[int64][]string
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -88,3 +137,21 @@ type InsertChannelRequest struct {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 8.2 Data Node API
|
||||||
|
|
||||||
|
```go
|
||||||
|
type DataNode interface {
|
||||||
|
Start() error
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
WatchDmChannels(channelIDs []string) error
|
||||||
|
WatchDdChannel(channelID string) error
|
||||||
|
SetTimeTickChannel(channelID string) error
|
||||||
|
SetStatsChannel(channelID string) error
|
||||||
|
|
||||||
|
Flush(req FlushRequest) error
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
108
docs/jaeger_guides/Opentracing User Guide.md
Normal file
108
docs/jaeger_guides/Opentracing User Guide.md
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
# Opentracing User Guide
|
||||||
|
|
||||||
|
This guide mainly describes the use of jaeger and the commonly used display meanings.
|
||||||
|
|
||||||
|
Jaeger, inspired by [Dapper](https://research.google.com/pubs/pub36356.html) and [OpenZipkin](https://zipkin.io/), is a distributed tracing platform created by [Uber Technologies](https://uber.github.io/) and donated to [Cloud Native Computing Foundation](https://cncf.io/). It can be used for monitoring microservices-based distributed systems:
|
||||||
|
|
||||||
|
- Distributed context propagation
|
||||||
|
- Distributed transaction monitoring
|
||||||
|
- Root cause analysis
|
||||||
|
- Service dependency analysis
|
||||||
|
- Performance / latency optimization
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- Jaeger [documentation](https://jaegertracing.io/docs/) for getting started, operational details, and other information.
|
||||||
|
- Blog post [Evolving Distributed Tracing at Uber](https://eng.uber.com/distributed-tracing/).
|
||||||
|
- Tutorial / walkthrough [Take OpenTracing for a HotROD ride](https://medium.com/@YuriShkuro/take-opentracing-for-a-hotrod-ride-f6e3141f7941).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
We mainly use jaeger as a implementation of opentracing.
|
||||||
|
|
||||||
|
Two request: **Insert Request** and **Search Request** in milvus-distributed system is traced at this stage.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Jaeger Home page
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Lookup by Trace ID
|
||||||
|
|
||||||
|
The use of the search box requires configuration of the log collection system. For example, if the log collection system collects the log, if it is an error log, find the Trace ID. Search in jaeger to quickly locate the error. So as to quickly solve the problem
|
||||||
|
|
||||||
|
### Search
|
||||||
|
|
||||||
|
### Service
|
||||||
|
|
||||||
|
Filter with service name
|
||||||
|
|
||||||
|
### Operation
|
||||||
|
|
||||||
|
Operation in Service, eg request name, function name
|
||||||
|
|
||||||
|
### Tags
|
||||||
|
|
||||||
|
Set tag to facilitate search. Tag is defined in code
|
||||||
|
|
||||||
|
### Lookback
|
||||||
|
|
||||||
|
Filter with time.
|
||||||
|
|
||||||
|
### Min Duraton Max Duration
|
||||||
|
|
||||||
|
The minimum and maximum request duration, you can use this condition to find the time-consuming and short-term requests
|
||||||
|
|
||||||
|
### Limit Result
|
||||||
|
|
||||||
|
The max number of result
|
||||||
|
|
||||||
|
## Search result
|
||||||
|
|
||||||
|
You can search through the above conditions, and those that meet the conditions will appear in the right half.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The detailed information of search result.
|
||||||
|
|
||||||
|
1. The upper left corner identifies the service name: tracing, Root span name: Insert grpc received and The first half of the Trace ID 46874e2.
|
||||||
|
2. The duration of the entire request is shown in the upper right corner.
|
||||||
|
3. 10675 Span means that there are 10675 operations, see the number of operations for each service in the middle. And the trace time is shown in the right.
|
||||||
|
4. The user can select multiple trace with the box in the upper left, and then compare with them to find something different. For example, different insert request may take a different time. At this time, you can select two for comparison, and the comparison can be very easy to know which trace went wrong
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Detailed trace information
|
||||||
|
|
||||||
|
Click the search result. You can analyze the detail trace information.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
1. Duration: Total time consume.
|
||||||
|
2. Service: The number of service called.
|
||||||
|
3. Depth: Call chain depth.
|
||||||
|
4. Total Spans: This call consists of 10 spans
|
||||||
|
5. To enter the details, look at the left half first, showing the call chain of the entire request. The black represents the service name, and the gray kid represents the span name defined in the code.
|
||||||
|
6. The duration of the right half of the code call. The length represents the time consumed by the Span in the entire call chain.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Span Detail information
|
||||||
|
|
||||||
|
Click the Span to see the detailed span information such as the last span in the picture above.
|
||||||
|
|
||||||
|
1. Tags contains a series of custom tags. You can mark in the code what type of call this Span is, request method, call result, call, etc. All the information it contains can be filtered by the Tags on the homepage.
|
||||||
|
2. Process can locate which specific server processing this data.
|
||||||
|
3. Logs are the logs printed by this span during the call.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## More
|
||||||
|
|
||||||
|
More usage guides will be updated in the future
|
||||||
|
|
||||||
BIN
docs/jaeger_guides/figs/jaeger_detailed_trace_info.png
Normal file
BIN
docs/jaeger_guides/figs/jaeger_detailed_trace_info.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 403 KiB |
BIN
docs/jaeger_guides/figs/jaeger_home_page.png
Normal file
BIN
docs/jaeger_guides/figs/jaeger_home_page.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 336 KiB |
BIN
docs/jaeger_guides/figs/jaeger_single_search_result.png
Normal file
BIN
docs/jaeger_guides/figs/jaeger_single_search_result.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 183 KiB |
22
go.mod
22
go.mod
@ -4,14 +4,19 @@ go 1.15
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
||||||
|
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
|
||||||
github.com/apache/pulsar-client-go v0.1.1
|
github.com/apache/pulsar-client-go v0.1.1
|
||||||
github.com/aws/aws-sdk-go v1.30.8
|
github.com/apache/thrift v0.13.0
|
||||||
|
github.com/aws/aws-sdk-go v1.30.8 // indirect
|
||||||
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
||||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548
|
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||||
|
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||||
github.com/frankban/quicktest v1.10.2 // indirect
|
github.com/frankban/quicktest v1.10.2 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
github.com/git-hooks/git-hooks v1.3.1 // indirect
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||||
|
github.com/golang/mock v1.3.1
|
||||||
github.com/golang/protobuf v1.3.2
|
github.com/golang/protobuf v1.3.2
|
||||||
github.com/google/btree v1.0.0
|
github.com/google/btree v1.0.0
|
||||||
github.com/klauspost/compress v1.10.11 // indirect
|
github.com/klauspost/compress v1.10.11 // indirect
|
||||||
@ -20,22 +25,25 @@ require (
|
|||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/onsi/ginkgo v1.12.1 // indirect
|
github.com/onsi/ginkgo v1.12.1 // indirect
|
||||||
github.com/onsi/gomega v1.10.0 // indirect
|
github.com/onsi/gomega v1.10.0 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0
|
||||||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||||
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 // indirect
|
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 // indirect
|
||||||
github.com/pingcap/errors v0.11.4 // indirect
|
github.com/pingcap/errors v0.11.4 // indirect
|
||||||
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 // indirect
|
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 // indirect
|
||||||
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48
|
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
||||||
github.com/prometheus/client_golang v1.5.1 // indirect
|
github.com/prometheus/client_golang v1.5.1 // indirect
|
||||||
github.com/prometheus/common v0.10.0 // indirect
|
github.com/prometheus/common v0.10.0 // indirect
|
||||||
github.com/prometheus/procfs v0.1.3 // indirect
|
github.com/prometheus/procfs v0.1.3 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/quasilyte/go-ruleguard v0.2.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0
|
github.com/spaolacci/murmur3 v1.1.0
|
||||||
github.com/spf13/cast v1.3.0
|
github.com/spf13/cast v1.3.0
|
||||||
github.com/spf13/viper v1.7.1
|
github.com/spf13/viper v1.7.1
|
||||||
github.com/stretchr/testify v1.6.1
|
github.com/stretchr/testify v1.6.1
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
||||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b
|
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b
|
||||||
|
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||||
|
github.com/uber/jaeger-lib v2.4.0+incompatible // indirect
|
||||||
github.com/urfave/cli v1.22.5 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
github.com/yahoo/athenz v1.9.16 // indirect
|
github.com/yahoo/athenz v1.9.16 // indirect
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738
|
||||||
@ -50,7 +58,7 @@ require (
|
|||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 // indirect
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 // indirect
|
||||||
google.golang.org/grpc v1.31.0
|
google.golang.org/grpc v1.31.0
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
gopkg.in/yaml.v2 v2.3.0
|
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
47
go.sum
47
go.sum
@ -15,6 +15,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
|||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw=
|
||||||
|
github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||||
@ -24,6 +26,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C
|
|||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/apache/pulsar-client-go v0.1.1 h1:v/kU+2ZCC6yFIcbZrFtWa9/nvVzVr18L+xYJUvZSxEQ=
|
github.com/apache/pulsar-client-go v0.1.1 h1:v/kU+2ZCC6yFIcbZrFtWa9/nvVzVr18L+xYJUvZSxEQ=
|
||||||
github.com/apache/pulsar-client-go v0.1.1/go.mod h1:mlxC65KL1BLhGO2bnT9zWMttVzR2czVPb27D477YpyU=
|
github.com/apache/pulsar-client-go v0.1.1/go.mod h1:mlxC65KL1BLhGO2bnT9zWMttVzR2czVPb27D477YpyU=
|
||||||
|
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
||||||
|
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4=
|
github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4=
|
||||||
github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI=
|
github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI=
|
||||||
github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk=
|
github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk=
|
||||||
@ -40,12 +44,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
|
||||||
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
|
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
|
||||||
github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4=
|
github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4=
|
||||||
github.com/cattail/go-exclude v0.0.0-20141118090525-7e63167c2dab h1:1WOH7EEbhb6OZWcIU5RpQx5rmHm1xEUda8Qiw4UzNlU=
|
|
||||||
github.com/cattail/go-exclude v0.0.0-20141118090525-7e63167c2dab/go.mod h1:5MSsYMW59C/HfIUsthTRDxRoMQctcmAVb1JnNSQXERA=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
@ -68,8 +68,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
|||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
|
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
|
||||||
@ -89,6 +87,12 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaI
|
|||||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||||
|
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
|
||||||
|
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk=
|
github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk=
|
||||||
github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||||
@ -96,8 +100,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/git-hooks/git-hooks v1.3.1 h1:ypdvNTXMiITXQxuqIl6t1f8R3V1FrUXPi1CwQ4guClo=
|
|
||||||
github.com/git-hooks/git-hooks v1.3.1/go.mod h1:RBqjgxUpRSLI4AxVZGV1FsODclMjpsJIaDEyJSoK3ws=
|
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
@ -117,6 +119,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18h
|
|||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
@ -134,10 +137,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
|
||||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
@ -168,6 +167,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
|||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
@ -343,19 +343,19 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/protocolbuffers/protobuf v3.14.0+incompatible h1:8r0H76h/Q/lEnFFY60AuM23NOnaDMi6bd7zuboSYM+o=
|
||||||
|
github.com/quasilyte/go-ruleguard v0.2.1 h1:56eRm0daAyny9UhJnmtJW/UyLZQusukBAB8oT8AHKHo=
|
||||||
|
github.com/quasilyte/go-ruleguard v0.2.1/go.mod h1:hN2rVc/uS4bQhQKTio2XaSJSafJwqBUWWwtssT3cQmc=
|
||||||
|
github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210108021830-2a284c158646 h1:ShKMENtS4KYekn92z5vdicyR7wBOgpxFNj5h0M2rNKg=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA=
|
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
@ -389,7 +389,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
github.com/stretchr/testify v0.0.0-20141015234014-d6577e08ec30/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
@ -398,24 +397,27 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
|
|||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b h1:VOG2GkM7RpRrT0St7HIIwCWrc3mVdf+DjcT8r2ucusI=
|
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b h1:VOG2GkM7RpRrT0St7HIIwCWrc3mVdf+DjcT8r2ucusI=
|
||||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b/go.mod h1:K0NcdVNrXDq92YPLytsrAwRMyuXi7GZCO6dXNH7OzQc=
|
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b/go.mod h1:K0NcdVNrXDq92YPLytsrAwRMyuXi7GZCO6dXNH7OzQc=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
|
github.com/uber/jaeger-client-go v1.6.0 h1:3+zLlq+4npI5fg8IsgAje3YsP7TcEdNzJScyqFIzxEQ=
|
||||||
|
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||||
|
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||||
|
github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo=
|
||||||
|
github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ=
|
||||||
|
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||||
github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||||
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
||||||
github.com/unrolled/render v1.0.0 h1:XYtvhA3UkpB7PqkvhUFYmpKD55OudoIeygcfus4vcd4=
|
github.com/unrolled/render v1.0.0 h1:XYtvhA3UkpB7PqkvhUFYmpKD55OudoIeygcfus4vcd4=
|
||||||
github.com/unrolled/render v1.0.0/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
|
github.com/unrolled/render v1.0.0/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
|
||||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
|
||||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/valyala/gozstd v1.7.0 h1:Ljh5c9zboqLhwTI33al32R72iCZfn0mCbVGcFWbGwRQ=
|
github.com/valyala/gozstd v1.7.0 h1:Ljh5c9zboqLhwTI33al32R72iCZfn0mCbVGcFWbGwRQ=
|
||||||
github.com/valyala/gozstd v1.7.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
github.com/valyala/gozstd v1.7.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
|
|
||||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
|
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/yahoo/athenz v1.8.55/go.mod h1:G7LLFUH7Z/r4QAB7FfudfuA7Am/eCzO1GlzBhDL6Kv0=
|
github.com/yahoo/athenz v1.8.55/go.mod h1:G7LLFUH7Z/r4QAB7FfudfuA7Am/eCzO1GlzBhDL6Kv0=
|
||||||
@ -569,6 +571,7 @@ golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k=
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|||||||
@ -137,10 +137,7 @@ type Allocator struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ta *Allocator) Start() error {
|
func (ta *Allocator) Start() error {
|
||||||
connectMasterFn := func() error {
|
err := ta.connectMaster()
|
||||||
return ta.connectMaster()
|
|
||||||
}
|
|
||||||
err := Retry(10, time.Millisecond*200, connectMasterFn)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("connect to master failed")
|
panic("connect to master failed")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,40 +0,0 @@
|
|||||||
package allocator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference: https://blog.cyeam.com/golang/2018/08/27/retry
|
|
||||||
|
|
||||||
func RetryImpl(attempts int, sleep time.Duration, fn func() error, maxSleepTime time.Duration) error {
|
|
||||||
if err := fn(); err != nil {
|
|
||||||
if s, ok := err.(InterruptError); ok {
|
|
||||||
return s.error
|
|
||||||
}
|
|
||||||
|
|
||||||
if attempts--; attempts > 0 {
|
|
||||||
log.Printf("retry func error: %s. attempts #%d after %s.", err.Error(), attempts, sleep)
|
|
||||||
time.Sleep(sleep)
|
|
||||||
if sleep < maxSleepTime {
|
|
||||||
return RetryImpl(attempts, 2*sleep, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
return RetryImpl(attempts, maxSleepTime, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Retry(attempts int, sleep time.Duration, fn func() error) error {
|
|
||||||
maxSleepTime := time.Millisecond * 1000
|
|
||||||
return RetryImpl(attempts, sleep, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
type InterruptError struct {
|
|
||||||
error
|
|
||||||
}
|
|
||||||
|
|
||||||
func NoRetryError(err error) InterruptError {
|
|
||||||
return InterruptError{err}
|
|
||||||
}
|
|
||||||
@ -19,7 +19,6 @@
|
|||||||
#include "utils/EasyAssert.h"
|
#include "utils/EasyAssert.h"
|
||||||
#include "IndexWrapper.h"
|
#include "IndexWrapper.h"
|
||||||
#include "indexbuilder/utils.h"
|
#include "indexbuilder/utils.h"
|
||||||
#include "index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.h"
|
|
||||||
|
|
||||||
namespace milvus {
|
namespace milvus {
|
||||||
namespace indexbuilder {
|
namespace indexbuilder {
|
||||||
@ -30,10 +29,10 @@ IndexWrapper::IndexWrapper(const char* serialized_type_params, const char* seria
|
|||||||
|
|
||||||
parse();
|
parse();
|
||||||
|
|
||||||
auto index_mode = get_index_mode();
|
std::map<std::string, knowhere::IndexMode> mode_map = {{"CPU", knowhere::IndexMode::MODE_CPU},
|
||||||
auto index_type = get_index_type();
|
{"GPU", knowhere::IndexMode::MODE_GPU}};
|
||||||
auto metric_type = get_metric_type();
|
auto mode = get_config_by_name<std::string>("index_mode");
|
||||||
AssertInfo(!is_unsupported(index_type, metric_type), index_type + " doesn't support metric: " + metric_type);
|
auto index_mode = mode.has_value() ? mode_map[mode.value()] : knowhere::IndexMode::MODE_CPU;
|
||||||
|
|
||||||
index_ = knowhere::VecIndexFactory::GetInstance().CreateVecIndex(get_index_type(), index_mode);
|
index_ = knowhere::VecIndexFactory::GetInstance().CreateVecIndex(get_index_type(), index_mode);
|
||||||
Assert(index_ != nullptr);
|
Assert(index_ != nullptr);
|
||||||
@ -155,11 +154,6 @@ IndexWrapper::dim() {
|
|||||||
void
|
void
|
||||||
IndexWrapper::BuildWithoutIds(const knowhere::DatasetPtr& dataset) {
|
IndexWrapper::BuildWithoutIds(const knowhere::DatasetPtr& dataset) {
|
||||||
auto index_type = get_index_type();
|
auto index_type = get_index_type();
|
||||||
auto index_mode = get_index_mode();
|
|
||||||
config_[knowhere::meta::ROWS] = dataset->Get<int64_t>(knowhere::meta::ROWS);
|
|
||||||
auto conf_adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_type);
|
|
||||||
AssertInfo(conf_adapter->CheckTrain(config_, index_mode), "something wrong in index parameters!");
|
|
||||||
|
|
||||||
if (is_in_need_id_list(index_type)) {
|
if (is_in_need_id_list(index_type)) {
|
||||||
PanicInfo(std::string(index_type) + " doesn't support build without ids yet!");
|
PanicInfo(std::string(index_type) + " doesn't support build without ids yet!");
|
||||||
}
|
}
|
||||||
@ -179,11 +173,6 @@ IndexWrapper::BuildWithoutIds(const knowhere::DatasetPtr& dataset) {
|
|||||||
void
|
void
|
||||||
IndexWrapper::BuildWithIds(const knowhere::DatasetPtr& dataset) {
|
IndexWrapper::BuildWithIds(const knowhere::DatasetPtr& dataset) {
|
||||||
Assert(dataset->data().find(milvus::knowhere::meta::IDS) != dataset->data().end());
|
Assert(dataset->data().find(milvus::knowhere::meta::IDS) != dataset->data().end());
|
||||||
auto index_type = get_index_type();
|
|
||||||
auto index_mode = get_index_mode();
|
|
||||||
config_[knowhere::meta::ROWS] = dataset->Get<int64_t>(knowhere::meta::ROWS);
|
|
||||||
auto conf_adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_type);
|
|
||||||
AssertInfo(conf_adapter->CheckTrain(config_, index_mode), "something wrong in index parameters!");
|
|
||||||
// index_->Train(dataset, config_);
|
// index_->Train(dataset, config_);
|
||||||
// index_->Add(dataset, config_);
|
// index_->Add(dataset, config_);
|
||||||
index_->BuildAll(dataset, config_);
|
index_->BuildAll(dataset, config_);
|
||||||
@ -274,31 +263,6 @@ IndexWrapper::get_index_type() {
|
|||||||
return type.has_value() ? type.value() : knowhere::IndexEnum::INDEX_FAISS_IVFPQ;
|
return type.has_value() ? type.value() : knowhere::IndexEnum::INDEX_FAISS_IVFPQ;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string
|
|
||||||
IndexWrapper::get_metric_type() {
|
|
||||||
auto type = get_config_by_name<std::string>(knowhere::Metric::TYPE);
|
|
||||||
if (type.has_value()) {
|
|
||||||
return type.value();
|
|
||||||
} else {
|
|
||||||
auto index_type = get_index_type();
|
|
||||||
if (is_in_bin_list(index_type)) {
|
|
||||||
return knowhere::Metric::JACCARD;
|
|
||||||
} else {
|
|
||||||
return knowhere::Metric::L2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
knowhere::IndexMode
|
|
||||||
IndexWrapper::get_index_mode() {
|
|
||||||
static std::map<std::string, knowhere::IndexMode> mode_map = {
|
|
||||||
{"CPU", knowhere::IndexMode::MODE_CPU},
|
|
||||||
{"GPU", knowhere::IndexMode::MODE_GPU},
|
|
||||||
};
|
|
||||||
auto mode = get_config_by_name<std::string>("index_mode");
|
|
||||||
return mode.has_value() ? mode_map[mode.value()] : knowhere::IndexMode::MODE_CPU;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<IndexWrapper::QueryResult>
|
std::unique_ptr<IndexWrapper::QueryResult>
|
||||||
IndexWrapper::Query(const knowhere::DatasetPtr& dataset) {
|
IndexWrapper::Query(const knowhere::DatasetPtr& dataset) {
|
||||||
return std::move(QueryImpl(dataset, config_));
|
return std::move(QueryImpl(dataset, config_));
|
||||||
|
|||||||
@ -59,12 +59,6 @@ class IndexWrapper {
|
|||||||
std::string
|
std::string
|
||||||
get_index_type();
|
get_index_type();
|
||||||
|
|
||||||
std::string
|
|
||||||
get_metric_type();
|
|
||||||
|
|
||||||
knowhere::IndexMode
|
|
||||||
get_index_mode();
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::optional<T>
|
std::optional<T>
|
||||||
get_config_by_name(std::string name);
|
get_config_by_name(std::string name);
|
||||||
|
|||||||
@ -35,7 +35,7 @@ CreateIndex(const char* serialized_type_params, const char* serialized_index_par
|
|||||||
*res_index = index.release();
|
*res_index = index.release();
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -59,7 +59,7 @@ BuildFloatVecIndexWithoutIds(CIndex index, int64_t float_value_num, const float*
|
|||||||
cIndex->BuildWithoutIds(ds);
|
cIndex->BuildWithoutIds(ds);
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -77,7 +77,7 @@ BuildBinaryVecIndexWithoutIds(CIndex index, int64_t data_size, const uint8_t* ve
|
|||||||
cIndex->BuildWithoutIds(ds);
|
cIndex->BuildWithoutIds(ds);
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -94,7 +94,7 @@ SerializeToSlicedBuffer(CIndex index, int32_t* buffer_size, char** res_buffer) {
|
|||||||
*res_buffer = binary.data;
|
*res_buffer = binary.data;
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -109,7 +109,7 @@ LoadFromSlicedBuffer(CIndex index, const char* serialized_sliced_blob_buffer, in
|
|||||||
cIndex->Load(serialized_sliced_blob_buffer, size);
|
cIndex->Load(serialized_sliced_blob_buffer, size);
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ QueryOnFloatVecIndex(CIndex index, int64_t float_value_num, const float* vectors
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -153,7 +153,7 @@ QueryOnFloatVecIndexWithParam(CIndex index,
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -173,7 +173,7 @@ QueryOnBinaryVecIndex(CIndex index, int64_t data_size, const uint8_t* vectors, C
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -197,7 +197,7 @@ QueryOnBinaryVecIndexWithParam(CIndex index,
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -213,7 +213,7 @@ CreateQueryResult(CIndexQueryResult* res) {
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ DeleteQueryResult(CIndexQueryResult res) {
|
|||||||
|
|
||||||
status.error_code = Success;
|
status.error_code = Success;
|
||||||
status.error_msg = "";
|
status.error_msg = "";
|
||||||
} catch (std::exception& e) {
|
} catch (std::runtime_error& e) {
|
||||||
status.error_code = UnexpectedException;
|
status.error_code = UnexpectedException;
|
||||||
status.error_msg = strdup(e.what());
|
status.error_msg = strdup(e.what());
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,7 +14,6 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <tuple>
|
|
||||||
|
|
||||||
#include "index/knowhere/knowhere/index/IndexType.h"
|
#include "index/knowhere/knowhere/index/IndexType.h"
|
||||||
|
|
||||||
@ -58,14 +57,6 @@ Need_BuildAll_list() {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::tuple<std::string, std::string>>
|
|
||||||
unsupported_index_combinations() {
|
|
||||||
static std::vector<std::tuple<std::string, std::string>> ret{
|
|
||||||
std::make_tuple(std::string(knowhere::IndexEnum::INDEX_FAISS_BIN_IVFFLAT), std::string(knowhere::Metric::L2)),
|
|
||||||
};
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
bool
|
bool
|
||||||
is_in_list(const T& t, std::function<std::vector<T>()> list_func) {
|
is_in_list(const T& t, std::function<std::vector<T>()> list_func) {
|
||||||
@ -93,11 +84,5 @@ is_in_need_id_list(const milvus::knowhere::IndexType& index_type) {
|
|||||||
return is_in_list<std::string>(index_type, Need_ID_List);
|
return is_in_list<std::string>(index_type, Need_ID_List);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
is_unsupported(const milvus::knowhere::IndexType& index_type, const milvus::knowhere::MetricType& metric_type) {
|
|
||||||
return is_in_list<std::tuple<std::string, std::string>>(std::make_tuple(index_type, metric_type),
|
|
||||||
unsupported_index_combinations);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace indexbuilder
|
} // namespace indexbuilder
|
||||||
} // namespace milvus
|
} // namespace milvus
|
||||||
|
|||||||
@ -63,8 +63,6 @@ SearchOnSealed(const Schema& schema,
|
|||||||
|
|
||||||
Assert(record.test_readiness(field_offset));
|
Assert(record.test_readiness(field_offset));
|
||||||
auto indexing_entry = record.get_entry(field_offset);
|
auto indexing_entry = record.get_entry(field_offset);
|
||||||
std::cout << " SearchOnSealed, indexing_entry->metric:" << indexing_entry->metric_type_ << std::endl;
|
|
||||||
std::cout << " SearchOnSealed, query_info.metric_type_:" << query_info.metric_type_ << std::endl;
|
|
||||||
Assert(indexing_entry->metric_type_ == GetMetricType(query_info.metric_type_));
|
Assert(indexing_entry->metric_type_ == GetMetricType(query_info.metric_type_));
|
||||||
|
|
||||||
auto final = [&] {
|
auto final = [&] {
|
||||||
|
|||||||
@ -61,17 +61,6 @@ InferIndexType(const Json& search_params) {
|
|||||||
PanicInfo("failed to infer index type");
|
PanicInfo("failed to infer index type");
|
||||||
}
|
}
|
||||||
|
|
||||||
static knowhere::IndexType
|
|
||||||
InferBinaryIndexType(const Json& search_params) {
|
|
||||||
namespace ip = knowhere::IndexParams;
|
|
||||||
namespace ie = knowhere::IndexEnum;
|
|
||||||
if (search_params.contains(ip::nprobe)) {
|
|
||||||
return ie::INDEX_FAISS_BIN_IVFFLAT;
|
|
||||||
} else {
|
|
||||||
return ie::INDEX_FAISS_BIN_IDMAP;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
VerifyPlanNodeVisitor::visit(FloatVectorANNS& node) {
|
VerifyPlanNodeVisitor::visit(FloatVectorANNS& node) {
|
||||||
auto& search_params = node.query_info_.search_params_;
|
auto& search_params = node.query_info_.search_params_;
|
||||||
@ -90,18 +79,7 @@ VerifyPlanNodeVisitor::visit(FloatVectorANNS& node) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
VerifyPlanNodeVisitor::visit(BinaryVectorANNS& node) {
|
VerifyPlanNodeVisitor::visit(BinaryVectorANNS& node) {
|
||||||
auto& search_params = node.query_info_.search_params_;
|
// TODO
|
||||||
auto inferred_type = InferBinaryIndexType(search_params);
|
|
||||||
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(inferred_type);
|
|
||||||
auto index_mode = knowhere::IndexMode::MODE_CPU;
|
|
||||||
|
|
||||||
// mock the api, topk will be passed from placeholder
|
|
||||||
auto params_copy = search_params;
|
|
||||||
params_copy[knowhere::meta::TOPK] = 10;
|
|
||||||
|
|
||||||
// NOTE: the second parameter is not checked in knowhere, may be redundant
|
|
||||||
auto passed = adapter->CheckSearch(params_copy, inferred_type, index_mode);
|
|
||||||
AssertInfo(passed, "invalid search params");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace milvus::query
|
} // namespace milvus::query
|
||||||
|
|||||||
@ -133,7 +133,7 @@ AppendBinaryIndex(CBinarySet c_binary_set, void* index_binary, int64_t index_siz
|
|||||||
auto binary_set = (milvus::knowhere::BinarySet*)c_binary_set;
|
auto binary_set = (milvus::knowhere::BinarySet*)c_binary_set;
|
||||||
std::string index_key(c_index_key);
|
std::string index_key(c_index_key);
|
||||||
uint8_t* index = (uint8_t*)index_binary;
|
uint8_t* index = (uint8_t*)index_binary;
|
||||||
std::shared_ptr<uint8_t[]> data(index, [](void*) {});
|
std::shared_ptr<uint8_t[]> data(index);
|
||||||
binary_set->Append(index_key, data, index_size);
|
binary_set->Append(index_key, data, index_size);
|
||||||
|
|
||||||
auto status = CStatus();
|
auto status = CStatus();
|
||||||
|
|||||||
@ -2,10 +2,6 @@ package indexbuilderclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -58,60 +54,21 @@ func (c *Client) BuildIndexWithoutID(columnDataPaths []string, typeParams map[st
|
|||||||
if c.tryConnect() != nil {
|
if c.tryConnect() != nil {
|
||||||
panic("BuildIndexWithoutID: failed to connect index builder")
|
panic("BuildIndexWithoutID: failed to connect index builder")
|
||||||
}
|
}
|
||||||
parseMap := func(mStr string) (map[string]string, error) {
|
|
||||||
buffer := make(map[string]interface{})
|
|
||||||
err := json.Unmarshal([]byte(mStr), &buffer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("Unmarshal params failed")
|
|
||||||
}
|
|
||||||
ret := make(map[string]string)
|
|
||||||
for key, value := range buffer {
|
|
||||||
valueStr := fmt.Sprintf("%v", value)
|
|
||||||
ret[key] = valueStr
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
var typeParamsKV []*commonpb.KeyValuePair
|
var typeParamsKV []*commonpb.KeyValuePair
|
||||||
for key := range typeParams {
|
for typeParam := range typeParams {
|
||||||
if key == "params" {
|
|
||||||
mapParams, err := parseMap(typeParams[key])
|
|
||||||
if err != nil {
|
|
||||||
log.Println("parse params error: ", err)
|
|
||||||
}
|
|
||||||
for pk, pv := range mapParams {
|
|
||||||
typeParamsKV = append(typeParamsKV, &commonpb.KeyValuePair{
|
typeParamsKV = append(typeParamsKV, &commonpb.KeyValuePair{
|
||||||
Key: pk,
|
Key: typeParam,
|
||||||
Value: pv,
|
Value: typeParams[typeParam],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
typeParamsKV = append(typeParamsKV, &commonpb.KeyValuePair{
|
|
||||||
Key: key,
|
|
||||||
Value: typeParams[key],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var indexParamsKV []*commonpb.KeyValuePair
|
var indexParamsKV []*commonpb.KeyValuePair
|
||||||
for key := range indexParams {
|
for indexParam := range indexParams {
|
||||||
if key == "params" {
|
|
||||||
mapParams, err := parseMap(indexParams[key])
|
|
||||||
if err != nil {
|
|
||||||
log.Println("parse params error: ", err)
|
|
||||||
}
|
|
||||||
for pk, pv := range mapParams {
|
|
||||||
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
||||||
Key: pk,
|
Key: indexParam,
|
||||||
Value: pv,
|
Value: indexParams[indexParam],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
|
||||||
Key: key,
|
|
||||||
Value: indexParams[key],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
requset := &indexbuilderpb.BuildIndexRequest{
|
requset := &indexbuilderpb.BuildIndexRequest{
|
||||||
|
|||||||
@ -14,7 +14,6 @@ package indexbuilder
|
|||||||
import "C"
|
import "C"
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
@ -106,13 +105,10 @@ func (index *CIndex) BuildFloatVecIndexWithoutIds(vectors []float32) error {
|
|||||||
CStatus
|
CStatus
|
||||||
BuildFloatVecIndexWithoutIds(CIndex index, int64_t float_value_num, const float* vectors);
|
BuildFloatVecIndexWithoutIds(CIndex index, int64_t float_value_num, const float* vectors);
|
||||||
*/
|
*/
|
||||||
fmt.Println("before BuildFloatVecIndexWithoutIds")
|
|
||||||
status := C.BuildFloatVecIndexWithoutIds(index.indexPtr, (C.int64_t)(len(vectors)), (*C.float)(&vectors[0]))
|
status := C.BuildFloatVecIndexWithoutIds(index.indexPtr, (C.int64_t)(len(vectors)), (*C.float)(&vectors[0]))
|
||||||
errorCode := status.error_code
|
errorCode := status.error_code
|
||||||
fmt.Println("BuildFloatVecIndexWithoutIds error code: ", errorCode)
|
|
||||||
if errorCode != 0 {
|
if errorCode != 0 {
|
||||||
errorMsg := C.GoString(status.error_msg)
|
errorMsg := C.GoString(status.error_msg)
|
||||||
fmt.Println("BuildFloatVecIndexWithoutIds error msg: ", errorMsg)
|
|
||||||
defer C.free(unsafe.Pointer(status.error_msg))
|
defer C.free(unsafe.Pointer(status.error_msg))
|
||||||
return errors.New("BuildFloatVecIndexWithoutIds failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
return errors.New("BuildFloatVecIndexWithoutIds failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
||||||
}
|
}
|
||||||
@ -146,8 +142,6 @@ func (index *CIndex) Delete() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewCIndex(typeParams, indexParams map[string]string) (Index, error) {
|
func NewCIndex(typeParams, indexParams map[string]string) (Index, error) {
|
||||||
fmt.Println("NNNNNNNNNNNNNNNNNNNNNNNNNNN typeParams: ", typeParams)
|
|
||||||
fmt.Println("NNNNNNNNNNNNNNNNNNNNNNNNNNN indexParams: ", indexParams)
|
|
||||||
protoTypeParams := &indexcgopb.TypeParams{
|
protoTypeParams := &indexcgopb.TypeParams{
|
||||||
Params: make([]*commonpb.KeyValuePair, 0),
|
Params: make([]*commonpb.KeyValuePair, 0),
|
||||||
}
|
}
|
||||||
@ -174,14 +168,10 @@ func NewCIndex(typeParams, indexParams map[string]string) (Index, error) {
|
|||||||
CIndex* res_index);
|
CIndex* res_index);
|
||||||
*/
|
*/
|
||||||
var indexPtr C.CIndex
|
var indexPtr C.CIndex
|
||||||
fmt.Println("before create index ........................................")
|
|
||||||
status := C.CreateIndex(typeParamsPointer, indexParamsPointer, &indexPtr)
|
status := C.CreateIndex(typeParamsPointer, indexParamsPointer, &indexPtr)
|
||||||
fmt.Println("after create index ........................................")
|
|
||||||
errorCode := status.error_code
|
errorCode := status.error_code
|
||||||
fmt.Println("EEEEEEEEEEEEEEEEEEEEEEEEEE error code: ", errorCode)
|
|
||||||
if errorCode != 0 {
|
if errorCode != 0 {
|
||||||
errorMsg := C.GoString(status.error_msg)
|
errorMsg := C.GoString(status.error_msg)
|
||||||
fmt.Println("EEEEEEEEEEEEEEEEEEEEEEEEEE error msg: ", errorMsg)
|
|
||||||
defer C.free(unsafe.Pointer(status.error_msg))
|
defer C.free(unsafe.Pointer(status.error_msg))
|
||||||
return nil, errors.New(" failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
return nil, errors.New(" failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -54,29 +54,20 @@ func CreateBuilder(ctx context.Context) (*Builder, error) {
|
|||||||
loopCancel: cancel,
|
loopCancel: cancel,
|
||||||
}
|
}
|
||||||
|
|
||||||
connectEtcdFn := func() error {
|
|
||||||
etcdAddress := Params.EtcdAddress
|
etcdAddress := Params.EtcdAddress
|
||||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
etcdKV := etcdkv.NewEtcdKV(etcdClient, Params.MetaRootPath)
|
etcdKV := etcdkv.NewEtcdKV(etcdClient, Params.MetaRootPath)
|
||||||
metakv, err := NewMetaTable(etcdKV)
|
metakv, err := NewMetaTable(etcdKV)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.metaTable = metakv
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err := Retry(10, time.Millisecond*200, connectEtcdFn)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
b.metaTable = metakv
|
||||||
|
|
||||||
idAllocator, err := allocator.NewIDAllocator(b.loopCtx, Params.MasterAddress)
|
idAllocator, err := allocator.NewIDAllocator(b.loopCtx, Params.MasterAddress)
|
||||||
b.idAllocator = idAllocator
|
|
||||||
|
|
||||||
connectMinIOFn := func() error {
|
|
||||||
option := &miniokv.Option{
|
option := &miniokv.Option{
|
||||||
Address: Params.MinIOAddress,
|
Address: Params.MinIOAddress,
|
||||||
AccessKeyID: Params.MinIOAccessKeyID,
|
AccessKeyID: Params.MinIOAccessKeyID,
|
||||||
@ -87,15 +78,10 @@ func CreateBuilder(ctx context.Context) (*Builder, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
b.kv, err = miniokv.NewMinIOKV(b.loopCtx, option)
|
b.kv, err = miniokv.NewMinIOKV(b.loopCtx, option)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err = Retry(10, time.Millisecond*200, connectMinIOFn)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
b.idAllocator = idAllocator
|
||||||
|
|
||||||
b.sched, err = NewTaskScheduler(b.loopCtx, b.idAllocator, b.kv, b.metaTable)
|
b.sched, err = NewTaskScheduler(b.loopCtx, b.idAllocator, b.kv, b.metaTable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -1,40 +0,0 @@
|
|||||||
package indexbuilder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference: https://blog.cyeam.com/golang/2018/08/27/retry
|
|
||||||
|
|
||||||
func RetryImpl(attempts int, sleep time.Duration, fn func() error, maxSleepTime time.Duration) error {
|
|
||||||
if err := fn(); err != nil {
|
|
||||||
if s, ok := err.(InterruptError); ok {
|
|
||||||
return s.error
|
|
||||||
}
|
|
||||||
|
|
||||||
if attempts--; attempts > 0 {
|
|
||||||
log.Printf("retry func error: %s. attempts #%d after %s.", err.Error(), attempts, sleep)
|
|
||||||
time.Sleep(sleep)
|
|
||||||
if sleep < maxSleepTime {
|
|
||||||
return RetryImpl(attempts, 2*sleep, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
return RetryImpl(attempts, maxSleepTime, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Retry(attempts int, sleep time.Duration, fn func() error) error {
|
|
||||||
maxSleepTime := time.Millisecond * 1000
|
|
||||||
return RetryImpl(attempts, sleep, fn, maxSleepTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
type InterruptError struct {
|
|
||||||
error
|
|
||||||
}
|
|
||||||
|
|
||||||
func NoRetryError(err error) InterruptError {
|
|
||||||
return InterruptError{err}
|
|
||||||
}
|
|
||||||
@ -2,7 +2,6 @@ package indexbuilder
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@ -172,12 +171,10 @@ func (it *IndexBuildTask) Execute() error {
|
|||||||
indexParams[key] = value
|
indexParams[key] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("before NewCIndex ..........................")
|
|
||||||
it.index, err = NewCIndex(typeParams, indexParams)
|
it.index, err = NewCIndex(typeParams, indexParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Println("after NewCIndex ..........................")
|
|
||||||
|
|
||||||
getKeyByPathNaive := func(path string) string {
|
getKeyByPathNaive := func(path string) string {
|
||||||
// splitElements := strings.Split(path, "/")
|
// splitElements := strings.Split(path, "/")
|
||||||
@ -226,7 +223,6 @@ func (it *IndexBuildTask) Execute() error {
|
|||||||
|
|
||||||
for _, value := range insertData.Data {
|
for _, value := range insertData.Data {
|
||||||
// TODO: BinaryVectorFieldData
|
// TODO: BinaryVectorFieldData
|
||||||
fmt.Println("before build index ..................................")
|
|
||||||
floatVectorFieldData, fOk := value.(*storage.FloatVectorFieldData)
|
floatVectorFieldData, fOk := value.(*storage.FloatVectorFieldData)
|
||||||
if fOk {
|
if fOk {
|
||||||
err = it.index.BuildFloatVecIndexWithoutIds(floatVectorFieldData.Data)
|
err = it.index.BuildFloatVecIndexWithoutIds(floatVectorFieldData.Data)
|
||||||
@ -242,15 +238,12 @@ func (it *IndexBuildTask) Execute() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Println("after build index ..................................")
|
|
||||||
|
|
||||||
if !fOk && !bOk {
|
if !fOk && !bOk {
|
||||||
return errors.New("we expect FloatVectorFieldData or BinaryVectorFieldData")
|
return errors.New("we expect FloatVectorFieldData or BinaryVectorFieldData")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("before serialize .............................................")
|
|
||||||
indexBlobs, err := it.index.Serialize()
|
indexBlobs, err := it.index.Serialize()
|
||||||
fmt.Println("after serialize .............................................")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
55
internal/kv/rocksdb/cwrapper/CMakeLists.txt
Normal file
55
internal/kv/rocksdb/cwrapper/CMakeLists.txt
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
|
||||||
|
project(wrapper)
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
include( ExternalProject )
|
||||||
|
set( ROCKSDB_VERSION "6.15.2" )
|
||||||
|
set( ROCKSDB_SOURCE_URL
|
||||||
|
"https://github.com/facebook/rocksdb/archive/v${ROCKSDB_VERSION}.tar.gz")
|
||||||
|
|
||||||
|
if( CUSTOM_THIRDPARTY_DOWNLOAD_PATH )
|
||||||
|
set( THIRDPARTY_DOWNLOAD_PATH ${CUSTOM_THIRDPARTY_DOWNLOAD_PATH} )
|
||||||
|
else()
|
||||||
|
set( THIRDPARTY_DOWNLOAD_PATH ${CMAKE_BINARY_DIR}/3rdparty_download/download )
|
||||||
|
endif()
|
||||||
|
message( STATUS "Thirdparty downloaded file path: ${THIRDPARTY_DOWNLOAD_PATH}" )
|
||||||
|
|
||||||
|
macro( build_rocksdb )
|
||||||
|
message( STATUS "Building ROCKSDB-${ROCKSDB_VERSION} from source" )
|
||||||
|
|
||||||
|
set( ROCKSDB_CMAKE_ARGS
|
||||||
|
"-DWITH_GFLAGS=OFF"
|
||||||
|
"-DROCKSDB_BUILD_SHARED=OFF"
|
||||||
|
# "-DWITH_SNAPPY=ON"
|
||||||
|
# "-DWITH_LZ4=ON"
|
||||||
|
# "-DWITH_ZSTD=ON"
|
||||||
|
# "-DWITH_BZ2=ON"
|
||||||
|
"-DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}"
|
||||||
|
)
|
||||||
|
|
||||||
|
ExternalProject_Add(
|
||||||
|
rocksdb-ep
|
||||||
|
PREFIX ${CMAKE_BINARY_DIR}/3rdparty_download/rocksdb-subbuild
|
||||||
|
BINARY_DIR rocksdb-bin
|
||||||
|
DOWNLOAD_DIR ${THIRDPARTY_DOWNLOAD_PATH}
|
||||||
|
INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
URL ${ROCKSDB_SOURCE_URL}
|
||||||
|
URL_MD5 "67f9e04fda62af551dd039c37b1808ac"
|
||||||
|
CMAKE_ARGS ${ROCKSDB_CMAKE_ARGS}
|
||||||
|
${EP_LOG_OPTIONS}
|
||||||
|
)
|
||||||
|
|
||||||
|
ExternalProject_Get_Property( rocksdb-ep INSTALL_DIR )
|
||||||
|
ExternalProject_Get_Property( rocksdb-ep BINARY_DIR )
|
||||||
|
|
||||||
|
if( NOT IS_DIRECTORY ${INSTALL_DIR}/include )
|
||||||
|
file( MAKE_DIRECTORY "${INSTALL_DIR}/include" )
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
build_rocksdb()
|
||||||
|
|
||||||
|
#endif()
|
||||||
53
internal/kv/rocksdb/cwrapper/build.sh
Executable file
53
internal/kv/rocksdb/cwrapper/build.sh
Executable file
@ -0,0 +1,53 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
SOURCE=${BASH_SOURCE[0]}
|
||||||
|
while [ -h $SOURCE ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||||
|
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
|
||||||
|
SOURCE=$(readlink $SOURCE)
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE=$DIR/$SOURCE # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
||||||
|
done
|
||||||
|
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
|
||||||
|
# echo $DIR
|
||||||
|
|
||||||
|
CGO_CFLAGS="-I$(pwd)/output/include"
|
||||||
|
CGO_LDFLAGS="-L$(pwd)/output/lib -l:librocksdb.a -lstdc++ -lm -lz"
|
||||||
|
|
||||||
|
OUTPUT_LIB=${DIR}/output
|
||||||
|
|
||||||
|
if [ -d ${OUTPUT_LIB} ];then
|
||||||
|
rm -rf ${OUTPUT_LIB}
|
||||||
|
fi
|
||||||
|
mkdir ${OUTPUT_LIB}
|
||||||
|
|
||||||
|
BUILD_TYPE="Debug"
|
||||||
|
|
||||||
|
while getopts "t:h:" arg; do
|
||||||
|
case $arg in
|
||||||
|
t)
|
||||||
|
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||||
|
;;
|
||||||
|
h) # help
|
||||||
|
echo "-t: build type(default: Debug)
|
||||||
|
-h: help
|
||||||
|
"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
?)
|
||||||
|
echo "ERROR! unknown argument"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
echo "BUILD_TYPE: " $BUILD_TYPE
|
||||||
|
|
||||||
|
pushd ${OUTPUT_LIB}
|
||||||
|
CMAKE_CMD="cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} .."
|
||||||
|
|
||||||
|
${CMAKE_CMD}
|
||||||
|
echo ${CMAKE_CMD}
|
||||||
|
|
||||||
|
if [[ ! ${jobs+1} ]]; then
|
||||||
|
jobs=$(nproc)
|
||||||
|
fi
|
||||||
|
make -j ${jobs}
|
||||||
128
internal/kv/rocksdb/rocksdb_kv.go
Normal file
128
internal/kv/rocksdb/rocksdb_kv.go
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
package rocksdbkv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tecbot/gorocksdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RocksdbKV struct {
|
||||||
|
opts *gorocksdb.Options
|
||||||
|
db *gorocksdb.DB
|
||||||
|
writeOptions *gorocksdb.WriteOptions
|
||||||
|
readOptions *gorocksdb.ReadOptions
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRocksdbKV(name string) (*RocksdbKV, error) {
|
||||||
|
bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
|
||||||
|
bbto.SetBlockCache(gorocksdb.NewLRUCache(3 << 30))
|
||||||
|
opts := gorocksdb.NewDefaultOptions()
|
||||||
|
opts.SetBlockBasedTableFactory(bbto)
|
||||||
|
opts.SetCreateIfMissing(true)
|
||||||
|
|
||||||
|
ro := gorocksdb.NewDefaultReadOptions()
|
||||||
|
ro.SetFillCache(false)
|
||||||
|
|
||||||
|
wo := gorocksdb.NewDefaultWriteOptions()
|
||||||
|
db, err := gorocksdb.OpenDb(opts, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &RocksdbKV{
|
||||||
|
opts: opts,
|
||||||
|
db: db,
|
||||||
|
writeOptions: wo,
|
||||||
|
readOptions: ro,
|
||||||
|
name: name,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) Close() {
|
||||||
|
kv.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) GetName() string {
|
||||||
|
return kv.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) Load(key string) (string, error) {
|
||||||
|
value, err := kv.db.Get(kv.readOptions, []byte(key))
|
||||||
|
defer value.Free()
|
||||||
|
return string(value.Data()), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) LoadWithPrefix(key string) ([]string, []string, error) {
|
||||||
|
iter := kv.db.NewIterator(kv.readOptions)
|
||||||
|
keys := make([]string, 0)
|
||||||
|
values := make([]string, 0)
|
||||||
|
iter.Seek([]byte(key))
|
||||||
|
for ; iter.Valid(); iter.Next() {
|
||||||
|
key := iter.Key()
|
||||||
|
value := iter.Value()
|
||||||
|
keys = append(keys, string(key.Data()))
|
||||||
|
values = append(values, string(value.Data()))
|
||||||
|
key.Free()
|
||||||
|
value.Free()
|
||||||
|
}
|
||||||
|
if err := iter.Err(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return keys, values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) MultiLoad(keys []string) ([]string, error) {
|
||||||
|
values := make([]string, 0, len(keys))
|
||||||
|
for _, key := range keys {
|
||||||
|
value, err := kv.db.Get(kv.readOptions, []byte(key))
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
values = append(values, string(value.Data()))
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) Save(key, value string) error {
|
||||||
|
err := kv.db.Put(kv.writeOptions, []byte(key), []byte(value))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) MultiSave(kvs map[string]string) error {
|
||||||
|
writeBatch := gorocksdb.NewWriteBatch()
|
||||||
|
defer writeBatch.Clear()
|
||||||
|
for k, v := range kvs {
|
||||||
|
writeBatch.Put([]byte(k), []byte(v))
|
||||||
|
}
|
||||||
|
err := kv.db.Write(kv.writeOptions, writeBatch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) RemoveWithPrefix(prefix string) error {
|
||||||
|
iter := kv.db.NewIterator(kv.readOptions)
|
||||||
|
iter.Seek([]byte(prefix))
|
||||||
|
for ; iter.Valid(); iter.Next() {
|
||||||
|
key := iter.Key()
|
||||||
|
err := kv.db.Delete(kv.writeOptions, key.Data())
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := iter.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) Remove(key string) error {
|
||||||
|
err := kv.db.Delete(kv.writeOptions, []byte(key))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *RocksdbKV) MultiRemove(keys []string) error {
|
||||||
|
writeBatch := gorocksdb.NewWriteBatch()
|
||||||
|
defer writeBatch.Clear()
|
||||||
|
for _, key := range keys {
|
||||||
|
writeBatch.Delete([]byte(key))
|
||||||
|
}
|
||||||
|
err := kv.db.Write(kv.writeOptions, writeBatch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
54
internal/kv/rocksdb/rocksdb_kv_test.go
Normal file
54
internal/kv/rocksdb/rocksdb_kv_test.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package rocksdbkv_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
rocksdbkv "github.com/zilliztech/milvus-distributed/internal/kv/rocksdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRocksdbKV(t *testing.T) {
|
||||||
|
name := "/tmp/rocksdb"
|
||||||
|
rocksdbKV, err := rocksdbkv.NewRocksdbKV(name)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer rocksdbKV.Close()
|
||||||
|
// Need to call RemoveWithPrefix
|
||||||
|
defer rocksdbKV.RemoveWithPrefix("")
|
||||||
|
|
||||||
|
err = rocksdbKV.Save("abc", "123")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
err = rocksdbKV.Save("abcd", "1234")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
val, err := rocksdbKV.Load("abc")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, val, "123")
|
||||||
|
|
||||||
|
keys, vals, err := rocksdbKV.LoadWithPrefix("abc")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, len(keys), len(vals))
|
||||||
|
assert.Equal(t, len(keys), 2)
|
||||||
|
|
||||||
|
assert.Equal(t, keys[0], "abc")
|
||||||
|
assert.Equal(t, keys[1], "abcd")
|
||||||
|
assert.Equal(t, vals[0], "123")
|
||||||
|
assert.Equal(t, vals[1], "1234")
|
||||||
|
|
||||||
|
err = rocksdbKV.Save("key_1", "123")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = rocksdbKV.Save("key_2", "456")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
err = rocksdbKV.Save("key_3", "789")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
keys = []string{"key_1", "key_2"}
|
||||||
|
vals, err = rocksdbKV.MultiLoad(keys)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, len(vals), len(keys))
|
||||||
|
assert.Equal(t, vals[0], "123")
|
||||||
|
assert.Equal(t, vals[1], "456")
|
||||||
|
}
|
||||||
@ -1,7 +1,6 @@
|
|||||||
package master
|
package master
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
buildindexclient "github.com/zilliztech/milvus-distributed/internal/indexbuilder/client"
|
buildindexclient "github.com/zilliztech/milvus-distributed/internal/indexbuilder/client"
|
||||||
@ -21,12 +20,9 @@ type MockWriteNodeClient struct {
|
|||||||
partitionTag string
|
partitionTag string
|
||||||
timestamp Timestamp
|
timestamp Timestamp
|
||||||
collectionID UniqueID
|
collectionID UniqueID
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
|
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
m.flushTime = time.Now()
|
m.flushTime = time.Now()
|
||||||
m.segmentID = segmentID
|
m.segmentID = segmentID
|
||||||
m.collectionID = collectionID
|
m.collectionID = collectionID
|
||||||
@ -37,8 +33,6 @@ func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID Uniq
|
|||||||
|
|
||||||
func (m *MockWriteNodeClient) DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error) {
|
func (m *MockWriteNodeClient) DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
m.lock.RLock()
|
|
||||||
defer m.lock.RUnlock()
|
|
||||||
if now.Sub(m.flushTime).Seconds() > 2 {
|
if now.Sub(m.flushTime).Seconds() > 2 {
|
||||||
return &writerclient.SegmentDescription{
|
return &writerclient.SegmentDescription{
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
|
|||||||
@ -24,6 +24,11 @@ func (task *createIndexTask) Ts() (Timestamp, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (task *createIndexTask) Execute() error {
|
func (task *createIndexTask) Execute() error {
|
||||||
|
// modify schema
|
||||||
|
if err := task.mt.UpdateFieldIndexParams(task.req.CollectionName, task.req.FieldName, task.req.ExtraParams); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// check if closed segment has the same index build history
|
||||||
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
|
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -39,20 +44,6 @@ func (task *createIndexTask) Execute() error {
|
|||||||
return fmt.Errorf("can not find field name %s", task.req.FieldName)
|
return fmt.Errorf("can not find field name %s", task.req.FieldName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pre checks
|
|
||||||
isIndexable, err := task.mt.IsIndexable(collMeta.ID, fieldID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !isIndexable {
|
|
||||||
return fmt.Errorf("field %s is not vector", task.req.FieldName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// modify schema
|
|
||||||
if err := task.mt.UpdateFieldIndexParams(task.req.CollectionName, task.req.FieldName, task.req.ExtraParams); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// check if closed segment has the same index build history
|
|
||||||
for _, segID := range collMeta.SegmentIDs {
|
for _, segID := range collMeta.SegmentIDs {
|
||||||
segMeta, err := task.mt.GetSegmentByID(segID)
|
segMeta, err := task.mt.GetSegmentByID(segID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -218,6 +218,7 @@ func CreateServer(ctx context.Context) (*Master, error) {
|
|||||||
|
|
||||||
m.grpcServer = grpc.NewServer()
|
m.grpcServer = grpc.NewServer()
|
||||||
masterpb.RegisterMasterServer(m.grpcServer, m)
|
masterpb.RegisterMasterServer(m.grpcServer, m)
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -65,8 +65,12 @@ func refreshChannelNames() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
|
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
|
||||||
|
for {
|
||||||
result := (*stream).Consume()
|
result := (*stream).Consume()
|
||||||
return result != nil
|
if len(result.Msgs) > 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
|
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
|
||||||
@ -77,14 +81,6 @@ func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
|
|||||||
return &msgPack
|
return &msgPack
|
||||||
}
|
}
|
||||||
|
|
||||||
func mockTimeTickBroadCast(msgStream ms.MsgStream, time Timestamp) error {
|
|
||||||
timeTick := [][2]uint64{
|
|
||||||
{0, time},
|
|
||||||
}
|
|
||||||
ttMsgPackForDD := getTimeTickMsgPack(timeTick)
|
|
||||||
return msgStream.Broadcast(ttMsgPackForDD)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMaster(t *testing.T) {
|
func TestMaster(t *testing.T) {
|
||||||
Init()
|
Init()
|
||||||
refreshMasterAddress()
|
refreshMasterAddress()
|
||||||
@ -114,6 +110,7 @@ func TestMaster(t *testing.T) {
|
|||||||
|
|
||||||
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
|
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
|
||||||
cli := masterpb.NewMasterClient(conn)
|
cli := masterpb.NewMasterClient(conn)
|
||||||
|
|
||||||
t.Run("TestConfigTask", func(t *testing.T) {
|
t.Run("TestConfigTask", func(t *testing.T) {
|
||||||
@ -537,15 +534,10 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow := Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
//consume msg
|
//consume msg
|
||||||
ddMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
ddMs.SetPulsarClient(pulsarAddr)
|
ddMs.SetPulsarClient(pulsarAddr)
|
||||||
ddMs.CreatePulsarConsumers(Params.DDChannelNames, Params.MsgChannelSubName, ms.NewUnmarshalDispatcher(), 1024)
|
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||||
ddMs.Start()
|
ddMs.Start()
|
||||||
|
|
||||||
var consumeMsg ms.MsgStream = ddMs
|
var consumeMsg ms.MsgStream = ddMs
|
||||||
@ -831,16 +823,11 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
//consume msg
|
//consume msg
|
||||||
ddMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
ddMs.SetPulsarClient(pulsarAddr)
|
ddMs.SetPulsarClient(pulsarAddr)
|
||||||
ddMs.CreatePulsarConsumers(Params.DDChannelNames, Params.MsgChannelSubName, ms.NewUnmarshalDispatcher(), 1024)
|
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||||
ddMs.Start()
|
ddMs.Start()
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow := Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var consumeMsg ms.MsgStream = ddMs
|
var consumeMsg ms.MsgStream = ddMs
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
@ -863,19 +850,19 @@ func TestMaster(t *testing.T) {
|
|||||||
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
|
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
|
||||||
writeNodeStream.Start()
|
writeNodeStream.Start()
|
||||||
|
|
||||||
ddMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
ddMs.SetPulsarClient(pulsarAddr)
|
ddMs.SetPulsarClient(pulsarAddr)
|
||||||
ddMs.CreatePulsarConsumers(Params.DDChannelNames, Params.MsgChannelSubName, ms.NewUnmarshalDispatcher(), 1024)
|
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||||
ddMs.Start()
|
ddMs.Start()
|
||||||
|
|
||||||
dMMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
dMMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
dMMs.SetPulsarClient(pulsarAddr)
|
dMMs.SetPulsarClient(pulsarAddr)
|
||||||
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, Params.MsgChannelSubName, ms.NewUnmarshalDispatcher(), 1024)
|
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, "DMStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||||
dMMs.Start()
|
dMMs.Start()
|
||||||
|
|
||||||
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
|
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
k2sMs.SetPulsarClient(pulsarAddr)
|
k2sMs.SetPulsarClient(pulsarAddr)
|
||||||
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, Params.MsgChannelSubName, ms.NewUnmarshalDispatcher(), 1024)
|
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, "K2SStream", ms.NewUnmarshalDispatcher(), 1024)
|
||||||
k2sMs.Start()
|
k2sMs.Start()
|
||||||
|
|
||||||
ttsoftmsgs := [][2]uint64{
|
ttsoftmsgs := [][2]uint64{
|
||||||
@ -900,12 +887,6 @@ func TestMaster(t *testing.T) {
|
|||||||
var k2sMsgstream ms.MsgStream = k2sMs
|
var k2sMsgstream ms.MsgStream = k2sMs
|
||||||
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
|
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
|
||||||
|
|
||||||
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
cli := masterpb.NewMasterClient(conn)
|
|
||||||
|
|
||||||
sch := schemapb.CollectionSchema{
|
sch := schemapb.CollectionSchema{
|
||||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||||
Description: "test collection",
|
Description: "test collection",
|
||||||
@ -916,11 +897,10 @@ func TestMaster(t *testing.T) {
|
|||||||
schemaBytes, err := proto.Marshal(&sch)
|
schemaBytes, err := proto.Marshal(&sch)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
////////////////////////////CreateCollection////////////////////////
|
|
||||||
createCollectionReq := internalpb.CreateCollectionRequest{
|
createCollectionReq := internalpb.CreateCollectionRequest{
|
||||||
MsgType: internalpb.MsgType_kCreateCollection,
|
MsgType: internalpb.MsgType_kCreateCollection,
|
||||||
ReqID: 1,
|
ReqID: 1,
|
||||||
Timestamp: Timestamp(time.Now().Unix()),
|
Timestamp: uint64(time.Now().Unix()),
|
||||||
ProxyID: 1,
|
ProxyID: 1,
|
||||||
Schema: &commonpb.Blob{Value: schemaBytes},
|
Schema: &commonpb.Blob{Value: schemaBytes},
|
||||||
}
|
}
|
||||||
@ -928,11 +908,6 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow := Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var consumeMsg ms.MsgStream = ddMs
|
var consumeMsg ms.MsgStream = ddMs
|
||||||
var createCollectionMsg *ms.CreateCollectionMsg
|
var createCollectionMsg *ms.CreateCollectionMsg
|
||||||
for {
|
for {
|
||||||
@ -967,11 +942,6 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow = Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var createPartitionMsg *ms.CreatePartitionMsg
|
var createPartitionMsg *ms.CreatePartitionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
@ -1006,11 +976,6 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow = Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var dropPartitionMsg *ms.DropPartitionMsg
|
var dropPartitionMsg *ms.DropPartitionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
@ -1041,11 +1006,6 @@ func TestMaster(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||||
|
|
||||||
time.Sleep(1000 * time.Millisecond)
|
|
||||||
timestampNow = Timestamp(time.Now().Unix())
|
|
||||||
err = mockTimeTickBroadCast(svr.timesSyncMsgProducer.ddSyncStream, timestampNow)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var dropCollectionMsg *ms.DropCollectionMsg
|
var dropCollectionMsg *ms.DropCollectionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
|
|||||||
@ -46,7 +46,7 @@ func TestMaster_Scheduler_Collection(t *testing.T) {
|
|||||||
pulsarDDStream.Start()
|
pulsarDDStream.Start()
|
||||||
defer pulsarDDStream.Close()
|
defer pulsarDDStream.Close()
|
||||||
|
|
||||||
consumeMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
consumeMs.SetPulsarClient(pulsarAddr)
|
consumeMs.SetPulsarClient(pulsarAddr)
|
||||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
||||||
consumeMs.Start()
|
consumeMs.Start()
|
||||||
@ -96,9 +96,6 @@ func TestMaster_Scheduler_Collection(t *testing.T) {
|
|||||||
err = createCollectionTask.WaitToFinish(ctx)
|
err = createCollectionTask.WaitToFinish(ctx)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(12))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var consumeMsg ms.MsgStream = consumeMs
|
var consumeMsg ms.MsgStream = consumeMs
|
||||||
var createCollectionMsg *ms.CreateCollectionMsg
|
var createCollectionMsg *ms.CreateCollectionMsg
|
||||||
for {
|
for {
|
||||||
@ -121,7 +118,7 @@ func TestMaster_Scheduler_Collection(t *testing.T) {
|
|||||||
dropCollectionReq := internalpb.DropCollectionRequest{
|
dropCollectionReq := internalpb.DropCollectionRequest{
|
||||||
MsgType: internalpb.MsgType_kDropCollection,
|
MsgType: internalpb.MsgType_kDropCollection,
|
||||||
ReqID: 1,
|
ReqID: 1,
|
||||||
Timestamp: 13,
|
Timestamp: 11,
|
||||||
ProxyID: 1,
|
ProxyID: 1,
|
||||||
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
|
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
|
||||||
}
|
}
|
||||||
@ -141,9 +138,6 @@ func TestMaster_Scheduler_Collection(t *testing.T) {
|
|||||||
err = dropCollectionTask.WaitToFinish(ctx)
|
err = dropCollectionTask.WaitToFinish(ctx)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(14))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var dropCollectionMsg *ms.DropCollectionMsg
|
var dropCollectionMsg *ms.DropCollectionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
@ -190,7 +184,7 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
pulsarDDStream.Start()
|
pulsarDDStream.Start()
|
||||||
defer pulsarDDStream.Close()
|
defer pulsarDDStream.Close()
|
||||||
|
|
||||||
consumeMs := ms.NewPulsarTtMsgStream(ctx, 1024)
|
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
|
||||||
consumeMs.SetPulsarClient(pulsarAddr)
|
consumeMs.SetPulsarClient(pulsarAddr)
|
||||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
|
||||||
consumeMs.Start()
|
consumeMs.Start()
|
||||||
@ -240,9 +234,6 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
err = createCollectionTask.WaitToFinish(ctx)
|
err = createCollectionTask.WaitToFinish(ctx)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(12))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var consumeMsg ms.MsgStream = consumeMs
|
var consumeMsg ms.MsgStream = consumeMs
|
||||||
var createCollectionMsg *ms.CreateCollectionMsg
|
var createCollectionMsg *ms.CreateCollectionMsg
|
||||||
for {
|
for {
|
||||||
@ -266,7 +257,7 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
createPartitionReq := internalpb.CreatePartitionRequest{
|
createPartitionReq := internalpb.CreatePartitionRequest{
|
||||||
MsgType: internalpb.MsgType_kCreatePartition,
|
MsgType: internalpb.MsgType_kCreatePartition,
|
||||||
ReqID: 1,
|
ReqID: 1,
|
||||||
Timestamp: 13,
|
Timestamp: 11,
|
||||||
ProxyID: 1,
|
ProxyID: 1,
|
||||||
PartitionName: &servicepb.PartitionName{
|
PartitionName: &servicepb.PartitionName{
|
||||||
CollectionName: sch.Name,
|
CollectionName: sch.Name,
|
||||||
@ -288,9 +279,6 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
err = createPartitionTask.WaitToFinish(ctx)
|
err = createPartitionTask.WaitToFinish(ctx)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(14))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var createPartitionMsg *ms.CreatePartitionMsg
|
var createPartitionMsg *ms.CreatePartitionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
@ -313,7 +301,7 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
dropPartitionReq := internalpb.DropPartitionRequest{
|
dropPartitionReq := internalpb.DropPartitionRequest{
|
||||||
MsgType: internalpb.MsgType_kDropPartition,
|
MsgType: internalpb.MsgType_kDropPartition,
|
||||||
ReqID: 1,
|
ReqID: 1,
|
||||||
Timestamp: 15,
|
Timestamp: 11,
|
||||||
ProxyID: 1,
|
ProxyID: 1,
|
||||||
PartitionName: &servicepb.PartitionName{
|
PartitionName: &servicepb.PartitionName{
|
||||||
CollectionName: sch.Name,
|
CollectionName: sch.Name,
|
||||||
@ -335,9 +323,6 @@ func TestMaster_Scheduler_Partition(t *testing.T) {
|
|||||||
err = dropPartitionTask.WaitToFinish(ctx)
|
err = dropPartitionTask.WaitToFinish(ctx)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(16))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var dropPartitionMsg *ms.DropPartitionMsg
|
var dropPartitionMsg *ms.DropPartitionMsg
|
||||||
for {
|
for {
|
||||||
result := consumeMsg.Consume()
|
result := consumeMsg.Consume()
|
||||||
|
|||||||
@ -126,7 +126,7 @@ func TestSegmentManager_AssignSegment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Duration(Params.SegIDAssignExpiration) * time.Millisecond)
|
time.Sleep(time.Duration(Params.SegIDAssignExpiration))
|
||||||
timestamp, err := globalTsoAllocator()
|
timestamp, err := globalTsoAllocator()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
err = mt.UpdateSegment(&pb.SegmentMeta{
|
err = mt.UpdateSegment(&pb.SegmentMeta{
|
||||||
@ -156,124 +156,3 @@ func TestSegmentManager_AssignSegment(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotEqualValues(t, 0, segMeta.CloseTime)
|
assert.NotEqualValues(t, 0, segMeta.CloseTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSegmentManager_SycnWritenode(t *testing.T) {
|
|
||||||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
|
||||||
defer cancelFunc()
|
|
||||||
|
|
||||||
Init()
|
|
||||||
Params.TopicNum = 5
|
|
||||||
Params.QueryNodeNum = 3
|
|
||||||
Params.SegmentSize = 536870912 / 1024 / 1024
|
|
||||||
Params.SegmentSizeFactor = 0.75
|
|
||||||
Params.DefaultRecordSize = 1024
|
|
||||||
Params.MinSegIDAssignCnt = 1048576 / 1024
|
|
||||||
Params.SegIDAssignExpiration = 2000
|
|
||||||
etcdAddress := Params.EtcdAddress
|
|
||||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
rootPath := "/test/root"
|
|
||||||
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
kvBase := etcdkv.NewEtcdKV(cli, rootPath)
|
|
||||||
defer kvBase.Close()
|
|
||||||
mt, err := NewMetaTable(kvBase)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
collName := "segmgr_test_coll"
|
|
||||||
var collID int64 = 1001
|
|
||||||
partitionTag := "test_part"
|
|
||||||
schema := &schemapb.CollectionSchema{
|
|
||||||
Name: collName,
|
|
||||||
Fields: []*schemapb.FieldSchema{
|
|
||||||
{FieldID: 1, Name: "f1", IsPrimaryKey: false, DataType: schemapb.DataType_INT32},
|
|
||||||
{FieldID: 2, Name: "f2", IsPrimaryKey: false, DataType: schemapb.DataType_VECTOR_FLOAT, TypeParams: []*commonpb.KeyValuePair{
|
|
||||||
{Key: "dim", Value: "128"},
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = mt.AddCollection(&pb.CollectionMeta{
|
|
||||||
ID: collID,
|
|
||||||
Schema: schema,
|
|
||||||
CreateTime: 0,
|
|
||||||
SegmentIDs: []UniqueID{},
|
|
||||||
PartitionTags: []string{},
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = mt.AddPartition(collID, partitionTag)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
var cnt int64
|
|
||||||
globalIDAllocator := func() (UniqueID, error) {
|
|
||||||
val := atomic.AddInt64(&cnt, 1)
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
globalTsoAllocator := func() (Timestamp, error) {
|
|
||||||
val := atomic.AddInt64(&cnt, 1)
|
|
||||||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
ts := tsoutil.ComposeTS(phy, val)
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
syncWriteChan := make(chan *msgstream.TimeTickMsg)
|
|
||||||
syncProxyChan := make(chan *msgstream.TimeTickMsg)
|
|
||||||
|
|
||||||
segAssigner := NewSegmentAssigner(ctx, mt, globalTsoAllocator, syncProxyChan)
|
|
||||||
mockScheduler := &MockFlushScheduler{}
|
|
||||||
segManager, err := NewSegmentManager(ctx, mt, globalIDAllocator, globalTsoAllocator, syncWriteChan, mockScheduler, segAssigner)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
segManager.Start()
|
|
||||||
defer segManager.Close()
|
|
||||||
sizePerRecord, err := typeutil.EstimateSizePerRecord(schema)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
maxCount := uint32(Params.SegmentSize * 1024 * 1024 / float64(sizePerRecord))
|
|
||||||
|
|
||||||
req := []*internalpb.SegIDRequest{
|
|
||||||
{Count: maxCount, ChannelID: 1, CollName: collName, PartitionTag: partitionTag},
|
|
||||||
{Count: maxCount, ChannelID: 2, CollName: collName, PartitionTag: partitionTag},
|
|
||||||
{Count: maxCount, ChannelID: 3, CollName: collName, PartitionTag: partitionTag},
|
|
||||||
}
|
|
||||||
assignSegment, err := segManager.AssignSegment(req)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
timestamp, err := globalTsoAllocator()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
for i := 0; i < len(assignSegment); i++ {
|
|
||||||
assert.EqualValues(t, maxCount, assignSegment[i].Count)
|
|
||||||
assert.EqualValues(t, i+1, assignSegment[i].ChannelID)
|
|
||||||
|
|
||||||
err = mt.UpdateSegment(&pb.SegmentMeta{
|
|
||||||
SegmentID: assignSegment[i].SegID,
|
|
||||||
CollectionID: collID,
|
|
||||||
PartitionTag: partitionTag,
|
|
||||||
ChannelStart: 0,
|
|
||||||
ChannelEnd: 1,
|
|
||||||
CloseTime: timestamp,
|
|
||||||
NumRows: int64(maxCount),
|
|
||||||
MemSize: 500000,
|
|
||||||
})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Duration(Params.SegIDAssignExpiration) * time.Millisecond)
|
|
||||||
|
|
||||||
timestamp, err = globalTsoAllocator()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
tsMsg := &msgstream.TimeTickMsg{
|
|
||||||
BaseMsg: msgstream.BaseMsg{
|
|
||||||
BeginTimestamp: timestamp, EndTimestamp: timestamp, HashValues: []uint32{},
|
|
||||||
},
|
|
||||||
TimeTickMsg: internalpb.TimeTickMsg{
|
|
||||||
MsgType: internalpb.MsgType_kTimeTick,
|
|
||||||
PeerID: 1,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
syncWriteChan <- tsMsg
|
|
||||||
time.Sleep(300 * time.Millisecond)
|
|
||||||
|
|
||||||
segManager.mu.RLock()
|
|
||||||
defer segManager.mu.RUnlock()
|
|
||||||
status := segManager.collStatus[collID]
|
|
||||||
assert.Empty(t, status.segments)
|
|
||||||
}
|
|
||||||
|
|||||||
@ -58,7 +58,6 @@ func initTestPulsarStream(ctx context.Context, pulsarAddress string,
|
|||||||
|
|
||||||
return &input, &output
|
return &input, &output
|
||||||
}
|
}
|
||||||
|
|
||||||
func receiveMsg(stream *ms.MsgStream) []uint64 {
|
func receiveMsg(stream *ms.MsgStream) []uint64 {
|
||||||
receiveCount := 0
|
receiveCount := 0
|
||||||
var results []uint64
|
var results []uint64
|
||||||
|
|||||||
@ -81,7 +81,7 @@ func (ttBarrier *softTimeTickBarrier) Start() error {
|
|||||||
// get a legal Timestamp
|
// get a legal Timestamp
|
||||||
ts := ttBarrier.minTimestamp()
|
ts := ttBarrier.minTimestamp()
|
||||||
lastTt := atomic.LoadInt64(&(ttBarrier.lastTt))
|
lastTt := atomic.LoadInt64(&(ttBarrier.lastTt))
|
||||||
if lastTt != 0 && ttBarrier.minTtInterval > ts-Timestamp(lastTt) {
|
if ttBarrier.lastTt != 0 && ttBarrier.minTtInterval > ts-Timestamp(lastTt) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ttBarrier.outTt <- ts
|
ttBarrier.outTt <- ts
|
||||||
|
|||||||
@ -192,15 +192,15 @@ func TestTt_SoftTtBarrierStart(t *testing.T) {
|
|||||||
|
|
||||||
func TestTt_SoftTtBarrierGetTimeTickClose(t *testing.T) {
|
func TestTt_SoftTtBarrierGetTimeTickClose(t *testing.T) {
|
||||||
channels := []string{"SoftTtBarrierGetTimeTickClose"}
|
channels := []string{"SoftTtBarrierGetTimeTickClose"}
|
||||||
//ttmsgs := [][2]int{
|
ttmsgs := [][2]int{
|
||||||
// {1, 10},
|
{1, 10},
|
||||||
// {2, 20},
|
{2, 20},
|
||||||
// {3, 30},
|
{3, 30},
|
||||||
// {4, 40},
|
{4, 40},
|
||||||
// {1, 30},
|
{1, 30},
|
||||||
// {2, 30},
|
{2, 30},
|
||||||
//}
|
}
|
||||||
inStream, ttStream := producer(channels, nil)
|
inStream, ttStream := producer(channels, ttmsgs)
|
||||||
defer func() {
|
defer func() {
|
||||||
(*inStream).Close()
|
(*inStream).Close()
|
||||||
(*ttStream).Close()
|
(*ttStream).Close()
|
||||||
@ -259,15 +259,15 @@ func TestTt_SoftTtBarrierGetTimeTickClose(t *testing.T) {
|
|||||||
|
|
||||||
func TestTt_SoftTtBarrierGetTimeTickCancel(t *testing.T) {
|
func TestTt_SoftTtBarrierGetTimeTickCancel(t *testing.T) {
|
||||||
channels := []string{"SoftTtBarrierGetTimeTickCancel"}
|
channels := []string{"SoftTtBarrierGetTimeTickCancel"}
|
||||||
//ttmsgs := [][2]int{
|
ttmsgs := [][2]int{
|
||||||
// {1, 10},
|
{1, 10},
|
||||||
// {2, 20},
|
{2, 20},
|
||||||
// {3, 30},
|
{3, 30},
|
||||||
// {4, 40},
|
{4, 40},
|
||||||
// {1, 30},
|
{1, 30},
|
||||||
// {2, 30},
|
{2, 30},
|
||||||
//}
|
}
|
||||||
inStream, ttStream := producer(channels, nil)
|
inStream, ttStream := producer(channels, ttmsgs)
|
||||||
defer func() {
|
defer func() {
|
||||||
(*inStream).Close()
|
(*inStream).Close()
|
||||||
(*ttStream).Close()
|
(*ttStream).Close()
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
package msgstream
|
package msgstream
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
)
|
)
|
||||||
@ -8,6 +10,8 @@ import (
|
|||||||
type MsgType = internalPb.MsgType
|
type MsgType = internalPb.MsgType
|
||||||
|
|
||||||
type TsMsg interface {
|
type TsMsg interface {
|
||||||
|
GetMsgContext() context.Context
|
||||||
|
SetMsgContext(context.Context)
|
||||||
BeginTs() Timestamp
|
BeginTs() Timestamp
|
||||||
EndTs() Timestamp
|
EndTs() Timestamp
|
||||||
Type() MsgType
|
Type() MsgType
|
||||||
@ -17,6 +21,7 @@ type TsMsg interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type BaseMsg struct {
|
type BaseMsg struct {
|
||||||
|
MsgCtx context.Context
|
||||||
BeginTimestamp Timestamp
|
BeginTimestamp Timestamp
|
||||||
EndTimestamp Timestamp
|
EndTimestamp Timestamp
|
||||||
HashValues []uint32
|
HashValues []uint32
|
||||||
@ -44,6 +49,14 @@ func (it *InsertMsg) Type() MsgType {
|
|||||||
return it.MsgType
|
return it.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (it *InsertMsg) GetMsgContext() context.Context {
|
||||||
|
return it.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *InsertMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
it.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
insertMsg := input.(*InsertMsg)
|
insertMsg := input.(*InsertMsg)
|
||||||
insertRequest := &insertMsg.InsertRequest
|
insertRequest := &insertMsg.InsertRequest
|
||||||
@ -88,6 +101,13 @@ func (fl *FlushMsg) Type() MsgType {
|
|||||||
return fl.GetMsgType()
|
return fl.GetMsgType()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fl *FlushMsg) GetMsgContext() context.Context {
|
||||||
|
return fl.MsgCtx
|
||||||
|
}
|
||||||
|
func (fl *FlushMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
fl.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (fl *FlushMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (fl *FlushMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
flushMsgTask := input.(*FlushMsg)
|
flushMsgTask := input.(*FlushMsg)
|
||||||
flushMsg := &flushMsgTask.FlushMsg
|
flushMsg := &flushMsgTask.FlushMsg
|
||||||
@ -121,6 +141,14 @@ func (dt *DeleteMsg) Type() MsgType {
|
|||||||
return dt.MsgType
|
return dt.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dt *DeleteMsg) GetMsgContext() context.Context {
|
||||||
|
return dt.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dt *DeleteMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
dt.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
deleteTask := input.(*DeleteMsg)
|
deleteTask := input.(*DeleteMsg)
|
||||||
deleteRequest := &deleteTask.DeleteRequest
|
deleteRequest := &deleteTask.DeleteRequest
|
||||||
@ -165,6 +193,14 @@ func (st *SearchMsg) Type() MsgType {
|
|||||||
return st.MsgType
|
return st.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *SearchMsg) GetMsgContext() context.Context {
|
||||||
|
return st.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *SearchMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
st.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
searchTask := input.(*SearchMsg)
|
searchTask := input.(*SearchMsg)
|
||||||
searchRequest := &searchTask.SearchRequest
|
searchRequest := &searchTask.SearchRequest
|
||||||
@ -198,6 +234,14 @@ func (srt *SearchResultMsg) Type() MsgType {
|
|||||||
return srt.MsgType
|
return srt.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (srt *SearchResultMsg) GetMsgContext() context.Context {
|
||||||
|
return srt.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srt *SearchResultMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
srt.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
searchResultTask := input.(*SearchResultMsg)
|
searchResultTask := input.(*SearchResultMsg)
|
||||||
searchResultRequest := &searchResultTask.SearchResult
|
searchResultRequest := &searchResultTask.SearchResult
|
||||||
@ -231,6 +275,14 @@ func (tst *TimeTickMsg) Type() MsgType {
|
|||||||
return tst.MsgType
|
return tst.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tst *TimeTickMsg) GetMsgContext() context.Context {
|
||||||
|
return tst.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tst *TimeTickMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
tst.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
timeTickTask := input.(*TimeTickMsg)
|
timeTickTask := input.(*TimeTickMsg)
|
||||||
timeTick := &timeTickTask.TimeTickMsg
|
timeTick := &timeTickTask.TimeTickMsg
|
||||||
@ -264,6 +316,14 @@ func (qs *QueryNodeStatsMsg) Type() MsgType {
|
|||||||
return qs.MsgType
|
return qs.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (qs *QueryNodeStatsMsg) GetMsgContext() context.Context {
|
||||||
|
return qs.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (qs *QueryNodeStatsMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
qs.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (qs *QueryNodeStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (qs *QueryNodeStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
queryNodeSegStatsTask := input.(*QueryNodeStatsMsg)
|
queryNodeSegStatsTask := input.(*QueryNodeStatsMsg)
|
||||||
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeStats
|
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeStats
|
||||||
@ -305,6 +365,14 @@ func (cc *CreateCollectionMsg) Type() MsgType {
|
|||||||
return cc.MsgType
|
return cc.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cc *CreateCollectionMsg) GetMsgContext() context.Context {
|
||||||
|
return cc.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *CreateCollectionMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
cc.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
createCollectionMsg := input.(*CreateCollectionMsg)
|
createCollectionMsg := input.(*CreateCollectionMsg)
|
||||||
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
|
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
|
||||||
@ -337,6 +405,13 @@ type DropCollectionMsg struct {
|
|||||||
func (dc *DropCollectionMsg) Type() MsgType {
|
func (dc *DropCollectionMsg) Type() MsgType {
|
||||||
return dc.MsgType
|
return dc.MsgType
|
||||||
}
|
}
|
||||||
|
func (dc *DropCollectionMsg) GetMsgContext() context.Context {
|
||||||
|
return dc.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dc *DropCollectionMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
dc.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
dropCollectionMsg := input.(*DropCollectionMsg)
|
dropCollectionMsg := input.(*DropCollectionMsg)
|
||||||
@ -361,111 +436,20 @@ func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|||||||
return dropCollectionMsg, nil
|
return dropCollectionMsg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////HasCollection//////////////////////////////////////////
|
|
||||||
type HasCollectionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.HasCollectionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasCollectionMsg) Type() MsgType {
|
|
||||||
return hc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
hasCollectionMsg := input.(*HasCollectionMsg)
|
|
||||||
hasCollectionRequest := &hasCollectionMsg.HasCollectionRequest
|
|
||||||
mb, err := proto.Marshal(hasCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
hasCollectionRequest := internalPb.HasCollectionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &hasCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hasCollectionMsg := &HasCollectionMsg{HasCollectionRequest: hasCollectionRequest}
|
|
||||||
hasCollectionMsg.BeginTimestamp = hasCollectionMsg.Timestamp
|
|
||||||
hasCollectionMsg.EndTimestamp = hasCollectionMsg.Timestamp
|
|
||||||
|
|
||||||
return hasCollectionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////DescribeCollection//////////////////////////////////////////
|
|
||||||
type DescribeCollectionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.DescribeCollectionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribeCollectionMsg) Type() MsgType {
|
|
||||||
return dc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribeCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
describeCollectionMsg := input.(*DescribeCollectionMsg)
|
|
||||||
describeCollectionRequest := &describeCollectionMsg.DescribeCollectionRequest
|
|
||||||
mb, err := proto.Marshal(describeCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribeCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
describeCollectionRequest := internalPb.DescribeCollectionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &describeCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
describeCollectionMsg := &DescribeCollectionMsg{DescribeCollectionRequest: describeCollectionRequest}
|
|
||||||
describeCollectionMsg.BeginTimestamp = describeCollectionMsg.Timestamp
|
|
||||||
describeCollectionMsg.EndTimestamp = describeCollectionMsg.Timestamp
|
|
||||||
|
|
||||||
return describeCollectionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////ShowCollection//////////////////////////////////////////
|
|
||||||
type ShowCollectionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.ShowCollectionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowCollectionMsg) Type() MsgType {
|
|
||||||
return sc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
showCollectionMsg := input.(*ShowCollectionMsg)
|
|
||||||
showCollectionRequest := &showCollectionMsg.ShowCollectionRequest
|
|
||||||
mb, err := proto.Marshal(showCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
showCollectionRequest := internalPb.ShowCollectionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &showCollectionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
showCollectionMsg := &ShowCollectionMsg{ShowCollectionRequest: showCollectionRequest}
|
|
||||||
showCollectionMsg.BeginTimestamp = showCollectionMsg.Timestamp
|
|
||||||
showCollectionMsg.EndTimestamp = showCollectionMsg.Timestamp
|
|
||||||
|
|
||||||
return showCollectionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
|
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
|
||||||
type CreatePartitionMsg struct {
|
type CreatePartitionMsg struct {
|
||||||
BaseMsg
|
BaseMsg
|
||||||
internalPb.CreatePartitionRequest
|
internalPb.CreatePartitionRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cc *CreatePartitionMsg) GetMsgContext() context.Context {
|
||||||
|
return cc.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *CreatePartitionMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
cc.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (cc *CreatePartitionMsg) Type() MsgType {
|
func (cc *CreatePartitionMsg) Type() MsgType {
|
||||||
return cc.MsgType
|
return cc.MsgType
|
||||||
}
|
}
|
||||||
@ -499,6 +483,14 @@ type DropPartitionMsg struct {
|
|||||||
internalPb.DropPartitionRequest
|
internalPb.DropPartitionRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dc *DropPartitionMsg) GetMsgContext() context.Context {
|
||||||
|
return dc.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dc *DropPartitionMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
dc.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (dc *DropPartitionMsg) Type() MsgType {
|
func (dc *DropPartitionMsg) Type() MsgType {
|
||||||
return dc.MsgType
|
return dc.MsgType
|
||||||
}
|
}
|
||||||
@ -526,105 +518,6 @@ func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|||||||
return dropPartitionMsg, nil
|
return dropPartitionMsg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////HasPartition//////////////////////////////////////////
|
|
||||||
type HasPartitionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.HasPartitionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasPartitionMsg) Type() MsgType {
|
|
||||||
return hc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
hasPartitionMsg := input.(*HasPartitionMsg)
|
|
||||||
hasPartitionRequest := &hasPartitionMsg.HasPartitionRequest
|
|
||||||
mb, err := proto.Marshal(hasPartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *HasPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
hasPartitionRequest := internalPb.HasPartitionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &hasPartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hasPartitionMsg := &HasPartitionMsg{HasPartitionRequest: hasPartitionRequest}
|
|
||||||
hasPartitionMsg.BeginTimestamp = hasPartitionMsg.Timestamp
|
|
||||||
hasPartitionMsg.EndTimestamp = hasPartitionMsg.Timestamp
|
|
||||||
|
|
||||||
return hasPartitionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////DescribePartition//////////////////////////////////////////
|
|
||||||
type DescribePartitionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.DescribePartitionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribePartitionMsg) Type() MsgType {
|
|
||||||
return dc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
describePartitionMsg := input.(*DescribePartitionMsg)
|
|
||||||
describePartitionRequest := &describePartitionMsg.DescribePartitionRequest
|
|
||||||
mb, err := proto.Marshal(describePartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dc *DescribePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
describePartitionRequest := internalPb.DescribePartitionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &describePartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
describePartitionMsg := &DescribePartitionMsg{DescribePartitionRequest: describePartitionRequest}
|
|
||||||
describePartitionMsg.BeginTimestamp = describePartitionMsg.Timestamp
|
|
||||||
describePartitionMsg.EndTimestamp = describePartitionMsg.Timestamp
|
|
||||||
|
|
||||||
return describePartitionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////ShowPartition//////////////////////////////////////////
|
|
||||||
type ShowPartitionMsg struct {
|
|
||||||
BaseMsg
|
|
||||||
internalPb.ShowPartitionRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowPartitionMsg) Type() MsgType {
|
|
||||||
return sc.MsgType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|
||||||
showPartitionMsg := input.(*ShowPartitionMsg)
|
|
||||||
showPartitionRequest := &showPartitionMsg.ShowPartitionRequest
|
|
||||||
mb, err := proto.Marshal(showPartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return mb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *ShowPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
|
||||||
showPartitionRequest := internalPb.ShowPartitionRequest{}
|
|
||||||
err := proto.Unmarshal(input, &showPartitionRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
showPartitionMsg := &ShowPartitionMsg{ShowPartitionRequest: showPartitionRequest}
|
|
||||||
showPartitionMsg.BeginTimestamp = showPartitionMsg.Timestamp
|
|
||||||
showPartitionMsg.EndTimestamp = showPartitionMsg.Timestamp
|
|
||||||
|
|
||||||
return showPartitionMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////LoadIndex//////////////////////////////////////////
|
/////////////////////////////////////////LoadIndex//////////////////////////////////////////
|
||||||
type LoadIndexMsg struct {
|
type LoadIndexMsg struct {
|
||||||
BaseMsg
|
BaseMsg
|
||||||
@ -635,6 +528,14 @@ func (lim *LoadIndexMsg) Type() MsgType {
|
|||||||
return lim.MsgType
|
return lim.MsgType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (lim *LoadIndexMsg) GetMsgContext() context.Context {
|
||||||
|
return lim.MsgCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lim *LoadIndexMsg) SetMsgContext(ctx context.Context) {
|
||||||
|
lim.MsgCtx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (lim *LoadIndexMsg) Marshal(input TsMsg) ([]byte, error) {
|
func (lim *LoadIndexMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||||
loadIndexMsg := input.(*LoadIndexMsg)
|
loadIndexMsg := input.(*LoadIndexMsg)
|
||||||
loadIndexRequest := &loadIndexMsg.LoadIndex
|
loadIndexRequest := &loadIndexMsg.LoadIndex
|
||||||
|
|||||||
@ -4,12 +4,15 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/apache/pulsar-client-go/pulsar"
|
"github.com/apache/pulsar-client-go/pulsar"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/opentracing/opentracing-go/ext"
|
||||||
|
oplog "github.com/opentracing/opentracing-go/log"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||||
commonPb "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
commonPb "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
@ -151,15 +154,35 @@ func (ms *PulsarMsgStream) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type propertiesReaderWriter struct {
|
||||||
|
ppMap map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ppRW *propertiesReaderWriter) Set(key, val string) {
|
||||||
|
// The GRPC HPACK implementation rejects any uppercase keys here.
|
||||||
|
//
|
||||||
|
// As such, since the HTTP_HEADERS format is case-insensitive anyway, we
|
||||||
|
// blindly lowercase the key (which is guaranteed to work in the
|
||||||
|
// Inject/Extract sense per the OpenTracing spec).
|
||||||
|
key = strings.ToLower(key)
|
||||||
|
ppRW.ppMap[key] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ppRW *propertiesReaderWriter) ForeachKey(handler func(key, val string) error) error {
|
||||||
|
for k, val := range ppRW.ppMap {
|
||||||
|
if err := handler(k, val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ms *PulsarMsgStream) Produce(msgPack *MsgPack) error {
|
func (ms *PulsarMsgStream) Produce(msgPack *MsgPack) error {
|
||||||
tsMsgs := msgPack.Msgs
|
tsMsgs := msgPack.Msgs
|
||||||
if len(tsMsgs) <= 0 {
|
if len(tsMsgs) <= 0 {
|
||||||
log.Printf("Warning: Receive empty msgPack")
|
log.Printf("Warning: Receive empty msgPack")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(ms.producers) <= 0 {
|
|
||||||
return errors.New("nil producer in msg stream")
|
|
||||||
}
|
|
||||||
reBucketValues := make([][]int32, len(tsMsgs))
|
reBucketValues := make([][]int32, len(tsMsgs))
|
||||||
for channelID, tsMsg := range tsMsgs {
|
for channelID, tsMsg := range tsMsgs {
|
||||||
hashValues := tsMsg.HashKeys()
|
hashValues := tsMsg.HashKeys()
|
||||||
@ -203,12 +226,51 @@ func (ms *PulsarMsgStream) Produce(msgPack *MsgPack) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg := &pulsar.ProducerMessage{Payload: mb}
|
||||||
|
var child opentracing.Span
|
||||||
|
if v.Msgs[i].Type() == internalPb.MsgType_kInsert ||
|
||||||
|
v.Msgs[i].Type() == internalPb.MsgType_kSearch ||
|
||||||
|
v.Msgs[i].Type() == internalPb.MsgType_kSearchResult {
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
ctx := v.Msgs[i].GetMsgContext()
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan("start send pulsar msg",
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan("start send pulsar msg")
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", v.Msgs[i].HashKeys())
|
||||||
|
child.SetTag("start time", v.Msgs[i].BeginTs())
|
||||||
|
child.SetTag("end time", v.Msgs[i].EndTs())
|
||||||
|
child.SetTag("msg type", v.Msgs[i].Type())
|
||||||
|
msg.Properties = make(map[string]string)
|
||||||
|
err = tracer.Inject(child.Context(), opentracing.TextMap, &propertiesReaderWriter{msg.Properties})
|
||||||
|
if err != nil {
|
||||||
|
child.LogFields(oplog.Error(err))
|
||||||
|
child.Finish()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
child.LogFields(oplog.String("inject success", "inject success"))
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := (*ms.producers[k]).Send(
|
if _, err := (*ms.producers[k]).Send(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
&pulsar.ProducerMessage{Payload: mb},
|
msg,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
if child != nil {
|
||||||
|
child.LogFields(oplog.Error(err))
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if child != nil {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -221,14 +283,50 @@ func (ms *PulsarMsgStream) Broadcast(msgPack *MsgPack) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
msg := &pulsar.ProducerMessage{Payload: mb}
|
||||||
|
var child opentracing.Span
|
||||||
|
if v.Type() == internalPb.MsgType_kInsert ||
|
||||||
|
v.Type() == internalPb.MsgType_kSearch ||
|
||||||
|
v.Type() == internalPb.MsgType_kSearchResult {
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
ctx := v.GetMsgContext()
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan("start send pulsar msg",
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan("start send pulsar msg, start time: %d")
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", v.HashKeys())
|
||||||
|
child.SetTag("start time", v.BeginTs())
|
||||||
|
child.SetTag("end time", v.EndTs())
|
||||||
|
child.SetTag("msg type", v.Type())
|
||||||
|
msg.Properties = make(map[string]string)
|
||||||
|
err = tracer.Inject(child.Context(), opentracing.TextMap, &propertiesReaderWriter{msg.Properties})
|
||||||
|
if err != nil {
|
||||||
|
child.LogFields(oplog.Error(err))
|
||||||
|
child.Finish()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
child.LogFields(oplog.String("inject success", "inject success"))
|
||||||
|
}
|
||||||
for i := 0; i < producerLen; i++ {
|
for i := 0; i < producerLen; i++ {
|
||||||
if _, err := (*ms.producers[i]).Send(
|
if _, err := (*ms.producers[i]).Send(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
&pulsar.ProducerMessage{Payload: mb},
|
msg,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
if child != nil {
|
||||||
|
child.LogFields(oplog.Error(err))
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if child != nil {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -261,6 +359,7 @@ func (ms *PulsarMsgStream) bufMsgPackToChannel() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ms.ctx.Done():
|
case <-ms.ctx.Done():
|
||||||
|
log.Println("done")
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
tsMsgList := make([]TsMsg, 0)
|
tsMsgList := make([]TsMsg, 0)
|
||||||
@ -273,6 +372,7 @@ func (ms *PulsarMsgStream) bufMsgPackToChannel() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pulsarMsg, ok := value.Interface().(pulsar.ConsumerMessage)
|
pulsarMsg, ok := value.Interface().(pulsar.ConsumerMessage)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Printf("type assertion failed, not consumer message type")
|
log.Printf("type assertion failed, not consumer message type")
|
||||||
continue
|
continue
|
||||||
@ -286,6 +386,23 @@ func (ms *PulsarMsgStream) bufMsgPackToChannel() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.MsgType)
|
tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.MsgType)
|
||||||
|
if tsMsg.Type() == internalPb.MsgType_kSearch ||
|
||||||
|
tsMsg.Type() == internalPb.MsgType_kSearchResult {
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
spanContext, err := tracer.Extract(opentracing.HTTPHeaders, &propertiesReaderWriter{pulsarMsg.Properties()})
|
||||||
|
if err != nil {
|
||||||
|
log.Println("extract message err")
|
||||||
|
log.Println(err.Error())
|
||||||
|
}
|
||||||
|
span := opentracing.StartSpan("pulsar msg received",
|
||||||
|
ext.RPCServerOption(spanContext))
|
||||||
|
span.SetTag("msg type", tsMsg.Type())
|
||||||
|
span.SetTag("hash keys", tsMsg.HashKeys())
|
||||||
|
span.SetTag("start time", tsMsg.BeginTs())
|
||||||
|
span.SetTag("end time", tsMsg.EndTs())
|
||||||
|
tsMsg.SetMsgContext(opentracing.ContextWithSpan(context.Background(), span))
|
||||||
|
span.Finish()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to unmarshal tsMsg, error = %v", err)
|
log.Printf("Failed to unmarshal tsMsg, error = %v", err)
|
||||||
continue
|
continue
|
||||||
@ -349,6 +466,8 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
|
|||||||
ms.inputBuf = make([]TsMsg, 0)
|
ms.inputBuf = make([]TsMsg, 0)
|
||||||
isChannelReady := make([]bool, len(ms.consumers))
|
isChannelReady := make([]bool, len(ms.consumers))
|
||||||
eofMsgTimeStamp := make(map[int]Timestamp)
|
eofMsgTimeStamp := make(map[int]Timestamp)
|
||||||
|
spans := make(map[Timestamp]opentracing.Span)
|
||||||
|
ctxs := make(map[Timestamp]context.Context)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ms.ctx.Done():
|
case <-ms.ctx.Done():
|
||||||
@ -356,16 +475,15 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
|
|||||||
default:
|
default:
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
findMapMutex := sync.RWMutex{}
|
|
||||||
for i := 0; i < len(ms.consumers); i++ {
|
for i := 0; i < len(ms.consumers); i++ {
|
||||||
if isChannelReady[i] {
|
if isChannelReady[i] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go ms.findTimeTick(i, eofMsgTimeStamp, &wg, &mu, &findMapMutex)
|
go ms.findTimeTick(i, eofMsgTimeStamp, &wg, &mu)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
timeStamp, ok := checkTimeTickMsg(eofMsgTimeStamp, isChannelReady, &findMapMutex)
|
timeStamp, ok := checkTimeTickMsg(eofMsgTimeStamp, isChannelReady)
|
||||||
if !ok || timeStamp <= ms.lastTimeStamp {
|
if !ok || timeStamp <= ms.lastTimeStamp {
|
||||||
log.Printf("All timeTick's timestamps are inconsistent")
|
log.Printf("All timeTick's timestamps are inconsistent")
|
||||||
continue
|
continue
|
||||||
@ -375,8 +493,22 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
|
|||||||
ms.inputBuf = append(ms.inputBuf, ms.unsolvedBuf...)
|
ms.inputBuf = append(ms.inputBuf, ms.unsolvedBuf...)
|
||||||
ms.unsolvedBuf = ms.unsolvedBuf[:0]
|
ms.unsolvedBuf = ms.unsolvedBuf[:0]
|
||||||
for _, v := range ms.inputBuf {
|
for _, v := range ms.inputBuf {
|
||||||
|
var ctx context.Context
|
||||||
|
var span opentracing.Span
|
||||||
|
if v.Type() == internalPb.MsgType_kInsert {
|
||||||
|
if _, ok := spans[v.BeginTs()]; !ok {
|
||||||
|
span, ctx = opentracing.StartSpanFromContext(v.GetMsgContext(), "after find time tick")
|
||||||
|
ctxs[v.BeginTs()] = ctx
|
||||||
|
spans[v.BeginTs()] = span
|
||||||
|
}
|
||||||
|
}
|
||||||
if v.EndTs() <= timeStamp {
|
if v.EndTs() <= timeStamp {
|
||||||
timeTickBuf = append(timeTickBuf, v)
|
timeTickBuf = append(timeTickBuf, v)
|
||||||
|
if v.Type() == internalPb.MsgType_kInsert {
|
||||||
|
v.SetMsgContext(ctxs[v.BeginTs()])
|
||||||
|
spans[v.BeginTs()].Finish()
|
||||||
|
delete(spans, v.BeginTs())
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ms.unsolvedBuf = append(ms.unsolvedBuf, v)
|
ms.unsolvedBuf = append(ms.unsolvedBuf, v)
|
||||||
}
|
}
|
||||||
@ -398,8 +530,7 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
|
|||||||
func (ms *PulsarTtMsgStream) findTimeTick(channelIndex int,
|
func (ms *PulsarTtMsgStream) findTimeTick(channelIndex int,
|
||||||
eofMsgMap map[int]Timestamp,
|
eofMsgMap map[int]Timestamp,
|
||||||
wg *sync.WaitGroup,
|
wg *sync.WaitGroup,
|
||||||
mu *sync.Mutex,
|
mu *sync.Mutex) {
|
||||||
findMapMutex *sync.RWMutex) {
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -425,10 +556,26 @@ func (ms *PulsarTtMsgStream) findTimeTick(channelIndex int,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to unmarshal, error = %v", err)
|
log.Printf("Failed to unmarshal, error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tsMsg.Type() == internalPb.MsgType_kInsert {
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
spanContext, err := tracer.Extract(opentracing.HTTPHeaders, &propertiesReaderWriter{pulsarMsg.Properties()})
|
||||||
|
if err != nil {
|
||||||
|
log.Println("extract message err")
|
||||||
|
log.Println(err.Error())
|
||||||
|
}
|
||||||
|
span := opentracing.StartSpan("pulsar msg received",
|
||||||
|
ext.RPCServerOption(spanContext))
|
||||||
|
span.SetTag("hash keys", tsMsg.HashKeys())
|
||||||
|
span.SetTag("start time", tsMsg.BeginTs())
|
||||||
|
span.SetTag("end time", tsMsg.EndTs())
|
||||||
|
span.SetTag("msg type", tsMsg.Type())
|
||||||
|
tsMsg.SetMsgContext(opentracing.ContextWithSpan(context.Background(), span))
|
||||||
|
span.Finish()
|
||||||
|
}
|
||||||
|
|
||||||
if headerMsg.MsgType == internalPb.MsgType_kTimeTick {
|
if headerMsg.MsgType == internalPb.MsgType_kTimeTick {
|
||||||
findMapMutex.Lock()
|
|
||||||
eofMsgMap[channelIndex] = tsMsg.(*TimeTickMsg).Timestamp
|
eofMsgMap[channelIndex] = tsMsg.(*TimeTickMsg).Timestamp
|
||||||
findMapMutex.Unlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
@ -477,7 +624,7 @@ func (ms *InMemMsgStream) Chan() <- chan *MsgPack {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func checkTimeTickMsg(msg map[int]Timestamp, isChannelReady []bool, mu *sync.RWMutex) (Timestamp, bool) {
|
func checkTimeTickMsg(msg map[int]Timestamp, isChannelReady []bool) (Timestamp, bool) {
|
||||||
checkMap := make(map[Timestamp]int)
|
checkMap := make(map[Timestamp]int)
|
||||||
var maxTime Timestamp = 0
|
var maxTime Timestamp = 0
|
||||||
for _, v := range msg {
|
for _, v := range msg {
|
||||||
@ -492,10 +639,7 @@ func checkTimeTickMsg(msg map[int]Timestamp, isChannelReady []bool, mu *sync.RWM
|
|||||||
}
|
}
|
||||||
return maxTime, true
|
return maxTime, true
|
||||||
}
|
}
|
||||||
for i := range msg {
|
for i, v := range msg {
|
||||||
mu.RLock()
|
|
||||||
v := msg[i]
|
|
||||||
mu.Unlock()
|
|
||||||
if v != maxTime {
|
if v != maxTime {
|
||||||
isChannelReady[i] = false
|
isChannelReady[i] = false
|
||||||
} else {
|
} else {
|
||||||
@ -510,7 +654,7 @@ func insertRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||||||
result := make(map[int32]*MsgPack)
|
result := make(map[int32]*MsgPack)
|
||||||
for i, request := range tsMsgs {
|
for i, request := range tsMsgs {
|
||||||
if request.Type() != internalPb.MsgType_kInsert {
|
if request.Type() != internalPb.MsgType_kInsert {
|
||||||
return nil, errors.New(string("msg's must be Insert"))
|
return nil, errors.New("msg's must be Insert")
|
||||||
}
|
}
|
||||||
insertRequest := request.(*InsertMsg)
|
insertRequest := request.(*InsertMsg)
|
||||||
keys := hashKeys[i]
|
keys := hashKeys[i]
|
||||||
@ -521,7 +665,7 @@ func insertRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||||||
keysLen := len(keys)
|
keysLen := len(keys)
|
||||||
|
|
||||||
if keysLen != timestampLen || keysLen != rowIDLen || keysLen != rowDataLen {
|
if keysLen != timestampLen || keysLen != rowIDLen || keysLen != rowDataLen {
|
||||||
return nil, errors.New(string("the length of hashValue, timestamps, rowIDs, RowData are not equal"))
|
return nil, errors.New("the length of hashValue, timestamps, rowIDs, RowData are not equal")
|
||||||
}
|
}
|
||||||
for index, key := range keys {
|
for index, key := range keys {
|
||||||
_, ok := result[key]
|
_, ok := result[key]
|
||||||
@ -544,6 +688,9 @@ func insertRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
insertMsg := &InsertMsg{
|
insertMsg := &InsertMsg{
|
||||||
|
BaseMsg: BaseMsg{
|
||||||
|
MsgCtx: request.GetMsgContext(),
|
||||||
|
},
|
||||||
InsertRequest: sliceRequest,
|
InsertRequest: sliceRequest,
|
||||||
}
|
}
|
||||||
result[key].Msgs = append(result[key].Msgs, insertMsg)
|
result[key].Msgs = append(result[key].Msgs, insertMsg)
|
||||||
@ -556,7 +703,7 @@ func deleteRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||||||
result := make(map[int32]*MsgPack)
|
result := make(map[int32]*MsgPack)
|
||||||
for i, request := range tsMsgs {
|
for i, request := range tsMsgs {
|
||||||
if request.Type() != internalPb.MsgType_kDelete {
|
if request.Type() != internalPb.MsgType_kDelete {
|
||||||
return nil, errors.New(string("msg's must be Delete"))
|
return nil, errors.New("msg's must be Delete")
|
||||||
}
|
}
|
||||||
deleteRequest := request.(*DeleteMsg)
|
deleteRequest := request.(*DeleteMsg)
|
||||||
keys := hashKeys[i]
|
keys := hashKeys[i]
|
||||||
@ -566,7 +713,7 @@ func deleteRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||||||
keysLen := len(keys)
|
keysLen := len(keys)
|
||||||
|
|
||||||
if keysLen != timestampLen || keysLen != primaryKeysLen {
|
if keysLen != timestampLen || keysLen != primaryKeysLen {
|
||||||
return nil, errors.New(string("the length of hashValue, timestamps, primaryKeys are not equal"))
|
return nil, errors.New("the length of hashValue, timestamps, primaryKeys are not equal")
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, key := range keys {
|
for index, key := range keys {
|
||||||
@ -600,7 +747,7 @@ func defaultRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack,
|
|||||||
for i, request := range tsMsgs {
|
for i, request := range tsMsgs {
|
||||||
keys := hashKeys[i]
|
keys := hashKeys[i]
|
||||||
if len(keys) != 1 {
|
if len(keys) != 1 {
|
||||||
return nil, errors.New(string("len(msg.hashValue) must equal 1"))
|
return nil, errors.New("len(msg.hashValue) must equal 1")
|
||||||
}
|
}
|
||||||
key := keys[0]
|
key := keys[0]
|
||||||
_, ok := result[key]
|
_, ok := result[key]
|
||||||
|
|||||||
@ -526,6 +526,8 @@ func TestStream_PulsarTtMsgStream_Insert(t *testing.T) {
|
|||||||
log.Fatalf("broadcast error = %v", err)
|
log.Fatalf("broadcast error = %v", err)
|
||||||
}
|
}
|
||||||
receiveMsg(outputStream, len(msgPack1.Msgs))
|
receiveMsg(outputStream, len(msgPack1.Msgs))
|
||||||
|
outputTtStream := (*outputStream).(*PulsarTtMsgStream)
|
||||||
|
fmt.Printf("timestamp = %v", outputTtStream.lastTimeStamp)
|
||||||
(*inputStream).Close()
|
(*inputStream).Close()
|
||||||
(*outputStream).Close()
|
(*outputStream).Close()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
@ -18,8 +19,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
|
func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(ctx, "insert grpc received")
|
||||||
|
defer span.Finish()
|
||||||
|
span.SetTag("collection name", in.CollectionName)
|
||||||
|
span.SetTag("partition tag", in.PartitionTag)
|
||||||
log.Println("insert into: ", in.CollectionName)
|
log.Println("insert into: ", in.CollectionName)
|
||||||
it := &InsertTask{
|
it := &InsertTask{
|
||||||
|
ctx: ctx,
|
||||||
Condition: NewTaskCondition(ctx),
|
Condition: NewTaskCondition(ctx),
|
||||||
BaseInsertTask: BaseInsertTask{
|
BaseInsertTask: BaseInsertTask{
|
||||||
BaseMsg: msgstream.BaseMsg{
|
BaseMsg: msgstream.BaseMsg{
|
||||||
@ -119,8 +125,14 @@ func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSc
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
|
func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(ctx, "search grpc received")
|
||||||
|
defer span.Finish()
|
||||||
|
span.SetTag("collection name", req.CollectionName)
|
||||||
|
span.SetTag("partition tag", req.PartitionTags)
|
||||||
|
span.SetTag("dsl", req.Dsl)
|
||||||
log.Println("search: ", req.CollectionName, req.Dsl)
|
log.Println("search: ", req.CollectionName, req.Dsl)
|
||||||
qt := &QueryTask{
|
qt := &QueryTask{
|
||||||
|
ctx: ctx,
|
||||||
Condition: NewTaskCondition(ctx),
|
Condition: NewTaskCondition(ctx),
|
||||||
SearchRequest: internalpb.SearchRequest{
|
SearchRequest: internalpb.SearchRequest{
|
||||||
ProxyID: Params.ProxyID(),
|
ProxyID: Params.ProxyID(),
|
||||||
|
|||||||
@ -2,6 +2,8 @@ package proxy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
@ -9,6 +11,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/uber/jaeger-client-go"
|
||||||
|
"github.com/uber/jaeger-client-go/config"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||||
@ -39,6 +45,9 @@ type Proxy struct {
|
|||||||
manipulationMsgStream *msgstream.PulsarMsgStream
|
manipulationMsgStream *msgstream.PulsarMsgStream
|
||||||
queryMsgStream *msgstream.PulsarMsgStream
|
queryMsgStream *msgstream.PulsarMsgStream
|
||||||
|
|
||||||
|
tracer opentracing.Tracer
|
||||||
|
closer io.Closer
|
||||||
|
|
||||||
// Add callback functions at different stages
|
// Add callback functions at different stages
|
||||||
startCallbacks []func()
|
startCallbacks []func()
|
||||||
closeCallbacks []func()
|
closeCallbacks []func()
|
||||||
@ -51,11 +60,28 @@ func Init() {
|
|||||||
func CreateProxy(ctx context.Context) (*Proxy, error) {
|
func CreateProxy(ctx context.Context) (*Proxy, error) {
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
ctx1, cancel := context.WithCancel(ctx)
|
ctx1, cancel := context.WithCancel(ctx)
|
||||||
|
var err error
|
||||||
p := &Proxy{
|
p := &Proxy{
|
||||||
proxyLoopCtx: ctx1,
|
proxyLoopCtx: ctx1,
|
||||||
proxyLoopCancel: cancel,
|
proxyLoopCancel: cancel,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg := &config.Configuration{
|
||||||
|
ServiceName: "proxy",
|
||||||
|
Sampler: &config.SamplerConfig{
|
||||||
|
Type: "const",
|
||||||
|
Param: 1,
|
||||||
|
},
|
||||||
|
Reporter: &config.ReporterConfig{
|
||||||
|
LogSpans: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p.tracer, p.closer, err = cfg.NewTracer(config.Logger(jaeger.StdLogger))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||||
|
}
|
||||||
|
opentracing.SetGlobalTracer(p.tracer)
|
||||||
|
|
||||||
pulsarAddress := Params.PulsarAddress()
|
pulsarAddress := Params.PulsarAddress()
|
||||||
|
|
||||||
p.queryMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamSearchBufSize())
|
p.queryMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamSearchBufSize())
|
||||||
@ -198,12 +224,17 @@ func (p *Proxy) stopProxyLoop() {
|
|||||||
p.tick.Close()
|
p.tick.Close()
|
||||||
|
|
||||||
p.proxyLoopWg.Wait()
|
p.proxyLoopWg.Wait()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the server.
|
// Close closes the server.
|
||||||
func (p *Proxy) Close() {
|
func (p *Proxy) Close() {
|
||||||
p.stopProxyLoop()
|
p.stopProxyLoop()
|
||||||
|
|
||||||
|
if p.closer != nil {
|
||||||
|
p.closer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
for _, cb := range p.closeCallbacks {
|
for _, cb := range p.closeCallbacks {
|
||||||
cb()
|
cb()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -210,7 +210,6 @@ func TestProxy_CreateCollection(t *testing.T) {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(group *sync.WaitGroup) {
|
go func(group *sync.WaitGroup) {
|
||||||
defer group.Done()
|
defer group.Done()
|
||||||
println("collectionName:", collectionName)
|
|
||||||
createCollection(t, collectionName)
|
createCollection(t, collectionName)
|
||||||
dropCollection(t, collectionName)
|
dropCollection(t, collectionName)
|
||||||
}(&wg)
|
}(&wg)
|
||||||
@ -489,9 +488,7 @@ func TestProxy_CreateIndex(t *testing.T) {
|
|||||||
go func(group *sync.WaitGroup) {
|
go func(group *sync.WaitGroup) {
|
||||||
defer group.Done()
|
defer group.Done()
|
||||||
createCollection(t, collName)
|
createCollection(t, collName)
|
||||||
if i%2 == 0 {
|
|
||||||
createIndex(t, collName, fieldName)
|
createIndex(t, collName, fieldName)
|
||||||
}
|
|
||||||
dropCollection(t, collName)
|
dropCollection(t, collName)
|
||||||
// dropIndex(t, collectionName, fieldName, indexName)
|
// dropIndex(t, collectionName, fieldName, indexName)
|
||||||
}(&wg)
|
}(&wg)
|
||||||
@ -513,9 +510,7 @@ func TestProxy_DescribeIndex(t *testing.T) {
|
|||||||
go func(group *sync.WaitGroup) {
|
go func(group *sync.WaitGroup) {
|
||||||
defer group.Done()
|
defer group.Done()
|
||||||
createCollection(t, collName)
|
createCollection(t, collName)
|
||||||
if i%2 == 0 {
|
|
||||||
createIndex(t, collName, fieldName)
|
createIndex(t, collName, fieldName)
|
||||||
}
|
|
||||||
req := &servicepb.DescribeIndexRequest{
|
req := &servicepb.DescribeIndexRequest{
|
||||||
CollectionName: collName,
|
CollectionName: collName,
|
||||||
FieldName: fieldName,
|
FieldName: fieldName,
|
||||||
@ -544,9 +539,7 @@ func TestProxy_DescribeIndexProgress(t *testing.T) {
|
|||||||
go func(group *sync.WaitGroup) {
|
go func(group *sync.WaitGroup) {
|
||||||
defer group.Done()
|
defer group.Done()
|
||||||
createCollection(t, collName)
|
createCollection(t, collName)
|
||||||
if i%2 == 0 {
|
|
||||||
createIndex(t, collName, fieldName)
|
createIndex(t, collName, fieldName)
|
||||||
}
|
|
||||||
req := &servicepb.DescribeIndexProgressRequest{
|
req := &servicepb.DescribeIndexProgressRequest{
|
||||||
CollectionName: collName,
|
CollectionName: collName,
|
||||||
FieldName: fieldName,
|
FieldName: fieldName,
|
||||||
|
|||||||
@ -182,6 +182,7 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
|
|||||||
insertMsg := &msgstream.InsertMsg{
|
insertMsg := &msgstream.InsertMsg{
|
||||||
InsertRequest: sliceRequest,
|
InsertRequest: sliceRequest,
|
||||||
}
|
}
|
||||||
|
insertMsg.SetMsgContext(request.GetMsgContext())
|
||||||
if together { // all rows with same hash value are accumulated to only one message
|
if together { // all rows with same hash value are accumulated to only one message
|
||||||
if len(result[key].Msgs) <= 0 {
|
if len(result[key].Msgs) <= 0 {
|
||||||
result[key].Msgs = append(result[key].Msgs, insertMsg)
|
result[key].Msgs = append(result[key].Msgs, insertMsg)
|
||||||
|
|||||||
@ -7,6 +7,9 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
oplog "github.com/opentracing/opentracing-go/log"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
@ -74,12 +77,21 @@ func (it *InsertTask) Type() internalpb.MsgType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (it *InsertTask) PreExecute() error {
|
func (it *InsertTask) PreExecute() error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask preExecute")
|
||||||
|
defer span.Finish()
|
||||||
|
it.ctx = ctx
|
||||||
|
span.SetTag("hash keys", it.ReqID)
|
||||||
|
span.SetTag("start time", it.BeginTs())
|
||||||
collectionName := it.BaseInsertTask.CollectionName
|
collectionName := it.BaseInsertTask.CollectionName
|
||||||
if err := ValidateCollectionName(collectionName); err != nil {
|
if err := ValidateCollectionName(collectionName); err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
partitionTag := it.BaseInsertTask.PartitionTag
|
partitionTag := it.BaseInsertTask.PartitionTag
|
||||||
if err := ValidatePartitionTag(partitionTag, true); err != nil {
|
if err := ValidatePartitionTag(partitionTag, true); err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,22 +99,36 @@ func (it *InsertTask) PreExecute() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (it *InsertTask) Execute() error {
|
func (it *InsertTask) Execute() error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask Execute")
|
||||||
|
defer span.Finish()
|
||||||
|
it.ctx = ctx
|
||||||
|
span.SetTag("hash keys", it.ReqID)
|
||||||
|
span.SetTag("start time", it.BeginTs())
|
||||||
collectionName := it.BaseInsertTask.CollectionName
|
collectionName := it.BaseInsertTask.CollectionName
|
||||||
|
span.LogFields(oplog.String("collection_name", collectionName))
|
||||||
if !globalMetaCache.Hit(collectionName) {
|
if !globalMetaCache.Hit(collectionName) {
|
||||||
err := globalMetaCache.Sync(collectionName)
|
err := globalMetaCache.Sync(collectionName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
description, err := globalMetaCache.Get(collectionName)
|
description, err := globalMetaCache.Get(collectionName)
|
||||||
if err != nil || description == nil {
|
if err != nil || description == nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
autoID := description.Schema.AutoID
|
autoID := description.Schema.AutoID
|
||||||
|
span.LogFields(oplog.Bool("auto_id", autoID))
|
||||||
var rowIDBegin UniqueID
|
var rowIDBegin UniqueID
|
||||||
var rowIDEnd UniqueID
|
var rowIDEnd UniqueID
|
||||||
rowNums := len(it.BaseInsertTask.RowData)
|
rowNums := len(it.BaseInsertTask.RowData)
|
||||||
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
|
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
|
||||||
|
span.LogFields(oplog.Int("rowNums", rowNums),
|
||||||
|
oplog.Int("rowIDBegin", int(rowIDBegin)),
|
||||||
|
oplog.Int("rowIDEnd", int(rowIDEnd)))
|
||||||
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
|
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
|
||||||
for i := rowIDBegin; i < rowIDEnd; i++ {
|
for i := rowIDBegin; i < rowIDEnd; i++ {
|
||||||
offset := i - rowIDBegin
|
offset := i - rowIDBegin
|
||||||
@ -125,6 +151,8 @@ func (it *InsertTask) Execute() error {
|
|||||||
EndTs: it.EndTs(),
|
EndTs: it.EndTs(),
|
||||||
Msgs: make([]msgstream.TsMsg, 1),
|
Msgs: make([]msgstream.TsMsg, 1),
|
||||||
}
|
}
|
||||||
|
tsMsg.SetMsgContext(ctx)
|
||||||
|
span.LogFields(oplog.String("send msg", "send msg"))
|
||||||
msgPack.Msgs[0] = tsMsg
|
msgPack.Msgs[0] = tsMsg
|
||||||
err = it.manipulationMsgStream.Produce(msgPack)
|
err = it.manipulationMsgStream.Produce(msgPack)
|
||||||
|
|
||||||
@ -138,11 +166,14 @@ func (it *InsertTask) Execute() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||||
it.result.Status.Reason = err.Error()
|
it.result.Status.Reason = err.Error()
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *InsertTask) PostExecute() error {
|
func (it *InsertTask) PostExecute() error {
|
||||||
|
span, _ := opentracing.StartSpanFromContext(it.ctx, "InsertTask postExecute")
|
||||||
|
defer span.Finish()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,30 +383,49 @@ func (qt *QueryTask) SetTs(ts Timestamp) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (qt *QueryTask) PreExecute() error {
|
func (qt *QueryTask) PreExecute() error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(qt.ctx, "QueryTask preExecute")
|
||||||
|
defer span.Finish()
|
||||||
|
qt.ctx = ctx
|
||||||
|
span.SetTag("hash keys", qt.ReqID)
|
||||||
|
span.SetTag("start time", qt.BeginTs())
|
||||||
|
|
||||||
collectionName := qt.query.CollectionName
|
collectionName := qt.query.CollectionName
|
||||||
if !globalMetaCache.Hit(collectionName) {
|
if !globalMetaCache.Hit(collectionName) {
|
||||||
err := globalMetaCache.Sync(collectionName)
|
err := globalMetaCache.Sync(collectionName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err := globalMetaCache.Get(collectionName)
|
_, err := globalMetaCache.Get(collectionName)
|
||||||
if err != nil { // err is not nil if collection not exists
|
if err != nil { // err is not nil if collection not exists
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ValidateCollectionName(qt.query.CollectionName); err != nil {
|
if err := ValidateCollectionName(qt.query.CollectionName); err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tag := range qt.query.PartitionTags {
|
for _, tag := range qt.query.PartitionTags {
|
||||||
if err := ValidatePartitionTag(tag, false); err != nil {
|
if err := ValidatePartitionTag(tag, false); err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qt.MsgType = internalpb.MsgType_kSearch
|
qt.MsgType = internalpb.MsgType_kSearch
|
||||||
|
if qt.query.PartitionTags == nil || len(qt.query.PartitionTags) <= 0 {
|
||||||
|
qt.query.PartitionTags = []string{Params.defaultPartitionTag()}
|
||||||
|
}
|
||||||
queryBytes, err := proto.Marshal(qt.query)
|
queryBytes, err := proto.Marshal(qt.query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
qt.Query = &commonpb.Blob{
|
qt.Query = &commonpb.Blob{
|
||||||
@ -385,6 +435,11 @@ func (qt *QueryTask) PreExecute() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (qt *QueryTask) Execute() error {
|
func (qt *QueryTask) Execute() error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(qt.ctx, "QueryTask Execute")
|
||||||
|
defer span.Finish()
|
||||||
|
qt.ctx = ctx
|
||||||
|
span.SetTag("hash keys", qt.ReqID)
|
||||||
|
span.SetTag("start time", qt.BeginTs())
|
||||||
var tsMsg msgstream.TsMsg = &msgstream.SearchMsg{
|
var tsMsg msgstream.TsMsg = &msgstream.SearchMsg{
|
||||||
SearchRequest: qt.SearchRequest,
|
SearchRequest: qt.SearchRequest,
|
||||||
BaseMsg: msgstream.BaseMsg{
|
BaseMsg: msgstream.BaseMsg{
|
||||||
@ -398,22 +453,31 @@ func (qt *QueryTask) Execute() error {
|
|||||||
EndTs: qt.Timestamp,
|
EndTs: qt.Timestamp,
|
||||||
Msgs: make([]msgstream.TsMsg, 1),
|
Msgs: make([]msgstream.TsMsg, 1),
|
||||||
}
|
}
|
||||||
|
tsMsg.SetMsgContext(ctx)
|
||||||
msgPack.Msgs[0] = tsMsg
|
msgPack.Msgs[0] = tsMsg
|
||||||
err := qt.queryMsgStream.Produce(msgPack)
|
err := qt.queryMsgStream.Produce(msgPack)
|
||||||
log.Printf("[Proxy] length of searchMsg: %v", len(msgPack.Msgs))
|
log.Printf("[Proxy] length of searchMsg: %v", len(msgPack.Msgs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
|
span.Finish()
|
||||||
log.Printf("[Proxy] send search request failed: %v", err)
|
log.Printf("[Proxy] send search request failed: %v", err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qt *QueryTask) PostExecute() error {
|
func (qt *QueryTask) PostExecute() error {
|
||||||
|
span, _ := opentracing.StartSpanFromContext(qt.ctx, "QueryTask postExecute")
|
||||||
|
defer span.Finish()
|
||||||
|
span.SetTag("hash keys", qt.ReqID)
|
||||||
|
span.SetTag("start time", qt.BeginTs())
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-qt.ctx.Done():
|
case <-qt.ctx.Done():
|
||||||
log.Print("wait to finish failed, timeout!")
|
log.Print("wait to finish failed, timeout!")
|
||||||
|
span.LogFields(oplog.String("wait to finish failed, timeout", "wait to finish failed, timeout"))
|
||||||
return errors.New("wait to finish failed, timeout")
|
return errors.New("wait to finish failed, timeout")
|
||||||
case searchResults := <-qt.resultBuf:
|
case searchResults := <-qt.resultBuf:
|
||||||
|
span.LogFields(oplog.String("receive result", "receive result"))
|
||||||
filterSearchResult := make([]*internalpb.SearchResult, 0)
|
filterSearchResult := make([]*internalpb.SearchResult, 0)
|
||||||
var filterReason string
|
var filterReason string
|
||||||
for _, partialSearchResult := range searchResults {
|
for _, partialSearchResult := range searchResults {
|
||||||
@ -432,12 +496,13 @@ func (qt *QueryTask) PostExecute() error {
|
|||||||
Reason: filterReason,
|
Reason: filterReason,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
span.LogFields(oplog.Error(errors.New(filterReason)))
|
||||||
return errors.New(filterReason)
|
return errors.New(filterReason)
|
||||||
}
|
}
|
||||||
|
|
||||||
hits := make([][]*servicepb.Hits, 0)
|
hits := make([][]*servicepb.Hits, 0)
|
||||||
for _, partialSearchResult := range filterSearchResult {
|
for _, partialSearchResult := range filterSearchResult {
|
||||||
if partialSearchResult.Hits == nil || len(partialSearchResult.Hits) <= 0 {
|
if len(partialSearchResult.Hits) <= 0 {
|
||||||
filterReason += "nq is zero\n"
|
filterReason += "nq is zero\n"
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -476,16 +541,7 @@ func (qt *QueryTask) PostExecute() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
topk := 0
|
topk := len(hits[0][0].IDs)
|
||||||
getMax := func(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
for _, hit := range hits {
|
|
||||||
topk = getMax(topk, len(hit[0].IDs))
|
|
||||||
}
|
|
||||||
qt.result = &servicepb.QueryResult{
|
qt.result = &servicepb.QueryResult{
|
||||||
Status: &commonpb.Status{
|
Status: &commonpb.Status{
|
||||||
ErrorCode: 0,
|
ErrorCode: 0,
|
||||||
@ -503,22 +559,14 @@ func (qt *QueryTask) PostExecute() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for j := 0; j < topk; j++ {
|
for j := 0; j < topk; j++ {
|
||||||
valid := false
|
|
||||||
choice, maxDistance := 0, minFloat32
|
choice, maxDistance := 0, minFloat32
|
||||||
for q, loc := range locs { // query num, the number of ways to merge
|
for q, loc := range locs { // query num, the number of ways to merge
|
||||||
if loc >= len(hits[q][i].IDs) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
distance := hits[q][i].Scores[loc]
|
distance := hits[q][i].Scores[loc]
|
||||||
if distance > maxDistance || (distance == maxDistance && choice != q) {
|
if distance > maxDistance {
|
||||||
choice = q
|
choice = q
|
||||||
maxDistance = distance
|
maxDistance = distance
|
||||||
valid = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !valid {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
choiceOffset := locs[choice]
|
choiceOffset := locs[choice]
|
||||||
// check if distance is valid, `invalid` here means very very big,
|
// check if distance is valid, `invalid` here means very very big,
|
||||||
// in this process, distance here is the smallest, so the rest of distance are all invalid
|
// in this process, distance here is the smallest, so the rest of distance are all invalid
|
||||||
@ -540,6 +588,7 @@ func (qt *QueryTask) PostExecute() error {
|
|||||||
reducedHitsBs, err := proto.Marshal(reducedHits)
|
reducedHitsBs, err := proto.Marshal(reducedHits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("marshal error")
|
log.Println("marshal error")
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
qt.result.Hits = append(qt.result.Hits, reducedHitsBs)
|
qt.result.Hits = append(qt.result.Hits, reducedHitsBs)
|
||||||
@ -651,7 +700,10 @@ func (dct *DescribeCollectionTask) PreExecute() error {
|
|||||||
func (dct *DescribeCollectionTask) Execute() error {
|
func (dct *DescribeCollectionTask) Execute() error {
|
||||||
var err error
|
var err error
|
||||||
dct.result, err = dct.masterClient.DescribeCollection(dct.ctx, &dct.DescribeCollectionRequest)
|
dct.result, err = dct.masterClient.DescribeCollection(dct.ctx, &dct.DescribeCollectionRequest)
|
||||||
globalMetaCache.Update(dct.CollectionName.CollectionName, dct.result)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = globalMetaCache.Update(dct.CollectionName.CollectionName, dct.result)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
type TaskQueue interface {
|
type TaskQueue interface {
|
||||||
utChan() <-chan int
|
utChan() <-chan int
|
||||||
UTEmpty() bool
|
utEmpty() bool
|
||||||
utFull() bool
|
utFull() bool
|
||||||
addUnissuedTask(t task) error
|
addUnissuedTask(t task) error
|
||||||
FrontUnissuedTask() task
|
FrontUnissuedTask() task
|
||||||
@ -44,9 +44,7 @@ func (queue *BaseTaskQueue) utChan() <-chan int {
|
|||||||
return queue.utBufChan
|
return queue.utBufChan
|
||||||
}
|
}
|
||||||
|
|
||||||
func (queue *BaseTaskQueue) UTEmpty() bool {
|
func (queue *BaseTaskQueue) utEmpty() bool {
|
||||||
queue.utLock.Lock()
|
|
||||||
defer queue.utLock.Unlock()
|
|
||||||
return queue.unissuedTasks.Len() == 0
|
return queue.unissuedTasks.Len() == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,7 +316,7 @@ func (sched *TaskScheduler) definitionLoop() {
|
|||||||
case <-sched.ctx.Done():
|
case <-sched.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-sched.DdQueue.utChan():
|
case <-sched.DdQueue.utChan():
|
||||||
if !sched.DdQueue.UTEmpty() {
|
if !sched.DdQueue.utEmpty() {
|
||||||
t := sched.scheduleDdTask()
|
t := sched.scheduleDdTask()
|
||||||
sched.processTask(t, sched.DdQueue)
|
sched.processTask(t, sched.DdQueue)
|
||||||
}
|
}
|
||||||
@ -333,7 +331,7 @@ func (sched *TaskScheduler) manipulationLoop() {
|
|||||||
case <-sched.ctx.Done():
|
case <-sched.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-sched.DmQueue.utChan():
|
case <-sched.DmQueue.utChan():
|
||||||
if !sched.DmQueue.UTEmpty() {
|
if !sched.DmQueue.utEmpty() {
|
||||||
t := sched.scheduleDmTask()
|
t := sched.scheduleDmTask()
|
||||||
go sched.processTask(t, sched.DmQueue)
|
go sched.processTask(t, sched.DmQueue)
|
||||||
}
|
}
|
||||||
@ -350,7 +348,7 @@ func (sched *TaskScheduler) queryLoop() {
|
|||||||
return
|
return
|
||||||
case <-sched.DqQueue.utChan():
|
case <-sched.DqQueue.utChan():
|
||||||
log.Print("scheduler receive query request ...")
|
log.Print("scheduler receive query request ...")
|
||||||
if !sched.DqQueue.UTEmpty() {
|
if !sched.DqQueue.utEmpty() {
|
||||||
t := sched.scheduleDqTask()
|
t := sched.scheduleDqTask()
|
||||||
go sched.processTask(t, sched.DqQueue)
|
go sched.processTask(t, sched.DqQueue)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -29,7 +29,7 @@ type timeTick struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel func()
|
cancel func()
|
||||||
timer *time.Ticker
|
timer *time.Ticker
|
||||||
tickLock sync.RWMutex
|
|
||||||
checkFunc tickCheckFunc
|
checkFunc tickCheckFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,8 +85,6 @@ func (tt *timeTick) tick() error {
|
|||||||
} else {
|
} else {
|
||||||
//log.Printf("proxy send time tick message")
|
//log.Printf("proxy send time tick message")
|
||||||
}
|
}
|
||||||
tt.tickLock.Lock()
|
|
||||||
defer tt.tickLock.Unlock()
|
|
||||||
tt.lastTick = tt.currentTick
|
tt.lastTick = tt.currentTick
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -107,8 +105,6 @@ func (tt *timeTick) tickLoop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tt *timeTick) LastTick() Timestamp {
|
func (tt *timeTick) LastTick() Timestamp {
|
||||||
tt.tickLock.RLock()
|
|
||||||
defer tt.tickLock.RUnlock()
|
|
||||||
return tt.lastTick
|
return tt.lastTick
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -332,11 +332,11 @@ func (colReplica *collectionReplicaImpl) getSegmentStatistics() []*internalpb.Se
|
|||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
MemorySize: currentMemSize,
|
MemorySize: currentMemSize,
|
||||||
NumRows: segmentNumOfRows,
|
NumRows: segmentNumOfRows,
|
||||||
RecentlyModified: segment.GetRecentlyModified(),
|
RecentlyModified: segment.recentlyModified,
|
||||||
}
|
}
|
||||||
|
|
||||||
statisticData = append(statisticData, &stat)
|
statisticData = append(statisticData, &stat)
|
||||||
segment.SetRecentlyModified(false)
|
segment.recentlyModified = false
|
||||||
}
|
}
|
||||||
|
|
||||||
return statisticData
|
return statisticData
|
||||||
|
|||||||
@ -1,9 +1,11 @@
|
|||||||
package querynode
|
package querynode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
@ -32,6 +34,28 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var childs []opentracing.Span
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
if tracer != nil && msgStreamMsg != nil {
|
||||||
|
for _, msg := range msgStreamMsg.TsMessages() {
|
||||||
|
if msg.Type() == internalPb.MsgType_kInsert || msg.Type() == internalPb.MsgType_kSearch {
|
||||||
|
var child opentracing.Span
|
||||||
|
ctx := msg.GetMsgContext()
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan("pass filter node",
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan("pass filter node")
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", msg.HashKeys())
|
||||||
|
child.SetTag("start time", msg.BeginTs())
|
||||||
|
child.SetTag("end time", msg.EndTs())
|
||||||
|
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
|
||||||
|
childs = append(childs, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ddMsg, ok := (*in[1]).(*ddMsg)
|
ddMsg, ok := (*in[1]).(*ddMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Println("type assertion failed for ddMsg")
|
log.Println("type assertion failed for ddMsg")
|
||||||
@ -46,11 +70,20 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
timestampMax: msgStreamMsg.TimestampMax(),
|
timestampMax: msgStreamMsg.TimestampMax(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, msg := range msgStreamMsg.TsMessages() {
|
for key, msg := range msgStreamMsg.TsMessages() {
|
||||||
switch msg.Type() {
|
switch msg.Type() {
|
||||||
case internalPb.MsgType_kInsert:
|
case internalPb.MsgType_kInsert:
|
||||||
|
var ctx2 context.Context
|
||||||
|
if childs != nil {
|
||||||
|
if childs[key] != nil {
|
||||||
|
ctx2 = opentracing.ContextWithSpan(msg.GetMsgContext(), childs[key])
|
||||||
|
} else {
|
||||||
|
ctx2 = context.Background()
|
||||||
|
}
|
||||||
|
}
|
||||||
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
||||||
if resMsg != nil {
|
if resMsg != nil {
|
||||||
|
resMsg.SetMsgContext(ctx2)
|
||||||
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
||||||
}
|
}
|
||||||
// case internalPb.MsgType_kDelete:
|
// case internalPb.MsgType_kDelete:
|
||||||
@ -62,6 +95,10 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
|
|
||||||
iMsg.gcRecord = ddMsg.gcRecord
|
iMsg.gcRecord = ddMsg.gcRecord
|
||||||
var res Msg = &iMsg
|
var res Msg = &iMsg
|
||||||
|
|
||||||
|
for _, child := range childs {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
return []*Msg{&res}
|
return []*Msg{&res}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,11 +1,15 @@
|
|||||||
package querynode
|
package querynode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
oplog "github.com/opentracing/opentracing-go/log"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
type insertNode struct {
|
type insertNode struct {
|
||||||
@ -14,6 +18,7 @@ type insertNode struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type InsertData struct {
|
type InsertData struct {
|
||||||
|
insertContext map[int64]context.Context
|
||||||
insertIDs map[UniqueID][]UniqueID
|
insertIDs map[UniqueID][]UniqueID
|
||||||
insertTimestamps map[UniqueID][]Timestamp
|
insertTimestamps map[UniqueID][]Timestamp
|
||||||
insertRecords map[UniqueID][]*commonpb.Blob
|
insertRecords map[UniqueID][]*commonpb.Blob
|
||||||
@ -38,7 +43,30 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
|||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var childs []opentracing.Span
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
if tracer != nil && iMsg != nil {
|
||||||
|
for _, msg := range iMsg.insertMessages {
|
||||||
|
if msg.Type() == internalPb.MsgType_kInsert || msg.Type() == internalPb.MsgType_kSearch {
|
||||||
|
var child opentracing.Span
|
||||||
|
ctx := msg.GetMsgContext()
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan("pass insert node",
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan("pass insert node")
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", msg.HashKeys())
|
||||||
|
child.SetTag("start time", msg.BeginTs())
|
||||||
|
child.SetTag("end time", msg.EndTs())
|
||||||
|
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
|
||||||
|
childs = append(childs, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
insertData := InsertData{
|
insertData := InsertData{
|
||||||
|
insertContext: make(map[int64]context.Context),
|
||||||
insertIDs: make(map[int64][]int64),
|
insertIDs: make(map[int64][]int64),
|
||||||
insertTimestamps: make(map[int64][]uint64),
|
insertTimestamps: make(map[int64][]uint64),
|
||||||
insertRecords: make(map[int64][]*commonpb.Blob),
|
insertRecords: make(map[int64][]*commonpb.Blob),
|
||||||
@ -47,6 +75,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
|||||||
|
|
||||||
// 1. hash insertMessages to insertData
|
// 1. hash insertMessages to insertData
|
||||||
for _, task := range iMsg.insertMessages {
|
for _, task := range iMsg.insertMessages {
|
||||||
|
insertData.insertContext[task.SegmentID] = task.GetMsgContext()
|
||||||
insertData.insertIDs[task.SegmentID] = append(insertData.insertIDs[task.SegmentID], task.RowIDs...)
|
insertData.insertIDs[task.SegmentID] = append(insertData.insertIDs[task.SegmentID], task.RowIDs...)
|
||||||
insertData.insertTimestamps[task.SegmentID] = append(insertData.insertTimestamps[task.SegmentID], task.Timestamps...)
|
insertData.insertTimestamps[task.SegmentID] = append(insertData.insertTimestamps[task.SegmentID], task.Timestamps...)
|
||||||
insertData.insertRecords[task.SegmentID] = append(insertData.insertRecords[task.SegmentID], task.RowData...)
|
insertData.insertRecords[task.SegmentID] = append(insertData.insertRecords[task.SegmentID], task.RowData...)
|
||||||
@ -85,7 +114,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
for segmentID := range insertData.insertRecords {
|
for segmentID := range insertData.insertRecords {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go iNode.insert(&insertData, segmentID, &wg)
|
go iNode.insert(insertData.insertContext[segmentID], &insertData, segmentID, &wg)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
@ -93,15 +122,21 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
|||||||
gcRecord: iMsg.gcRecord,
|
gcRecord: iMsg.gcRecord,
|
||||||
timeRange: iMsg.timeRange,
|
timeRange: iMsg.timeRange,
|
||||||
}
|
}
|
||||||
|
for _, child := range childs {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
return []*Msg{&res}
|
return []*Msg{&res}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *sync.WaitGroup) {
|
func (iNode *insertNode) insert(ctx context.Context, insertData *InsertData, segmentID int64, wg *sync.WaitGroup) {
|
||||||
|
span, _ := opentracing.StartSpanFromContext(ctx, "insert node insert function")
|
||||||
|
defer span.Finish()
|
||||||
var targetSegment, err = iNode.replica.getSegmentByID(segmentID)
|
var targetSegment, err = iNode.replica.getSegmentByID(segmentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("cannot find segment:", segmentID)
|
log.Println("cannot find segment:", segmentID)
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
wg.Done()
|
wg.Done()
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,6 +150,7 @@ func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *syn
|
|||||||
log.Println(err)
|
log.Println(err)
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
wg.Done()
|
wg.Done()
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -100,18 +100,8 @@ func (lis *loadIndexService) start() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// 1. use msg's index paths to get index bytes
|
// 1. use msg's index paths to get index bytes
|
||||||
fmt.Println("start load index")
|
|
||||||
var err error
|
|
||||||
ok, err = lis.checkIndexReady(indexMsg)
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var indexBuffer [][]byte
|
var indexBuffer [][]byte
|
||||||
|
var err error
|
||||||
fn := func() error {
|
fn := func() error {
|
||||||
indexBuffer, err = lis.loadIndex(indexMsg.IndexPaths)
|
indexBuffer, err = lis.loadIndex(indexMsg.IndexPaths)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -148,13 +138,6 @@ func (lis *loadIndexService) start() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lis *loadIndexService) close() {
|
|
||||||
if lis.loadIndexMsgStream != nil {
|
|
||||||
lis.loadIndexMsgStream.Close()
|
|
||||||
}
|
|
||||||
lis.cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lis *loadIndexService) printIndexParams(index []*commonpb.KeyValuePair) {
|
func (lis *loadIndexService) printIndexParams(index []*commonpb.KeyValuePair) {
|
||||||
fmt.Println("=================================================")
|
fmt.Println("=================================================")
|
||||||
for i := 0; i < len(index); i++ {
|
for i := 0; i < len(index); i++ {
|
||||||
@ -207,7 +190,6 @@ func (lis *loadIndexService) updateSegmentIndexStats(indexMsg *msgstream.LoadInd
|
|||||||
fieldStatsKey := lis.fieldsStatsIDs2Key(targetSegment.collectionID, indexMsg.FieldID)
|
fieldStatsKey := lis.fieldsStatsIDs2Key(targetSegment.collectionID, indexMsg.FieldID)
|
||||||
_, ok := lis.fieldIndexes[fieldStatsKey]
|
_, ok := lis.fieldIndexes[fieldStatsKey]
|
||||||
newIndexParams := indexMsg.IndexParams
|
newIndexParams := indexMsg.IndexParams
|
||||||
|
|
||||||
// sort index params by key
|
// sort index params by key
|
||||||
sort.Slice(newIndexParams, func(i, j int) bool { return newIndexParams[i].Key < newIndexParams[j].Key })
|
sort.Slice(newIndexParams, func(i, j int) bool { return newIndexParams[i].Key < newIndexParams[j].Key })
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -233,7 +215,6 @@ func (lis *loadIndexService) updateSegmentIndexStats(indexMsg *msgstream.LoadInd
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
targetSegment.setIndexParam(indexMsg.FieldID, indexMsg.IndexParams)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -305,15 +286,3 @@ func (lis *loadIndexService) sendQueryNodeStats() error {
|
|||||||
fmt.Println("sent field stats")
|
fmt.Println("sent field stats")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lis *loadIndexService) checkIndexReady(loadIndexMsg *msgstream.LoadIndexMsg) (bool, error) {
|
|
||||||
segment, err := lis.replica.getSegmentByID(loadIndexMsg.SegmentID)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !segment.matchIndexParam(loadIndexMsg.FieldID, loadIndexMsg.IndexParams) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|||||||
@ -22,29 +22,26 @@ import (
|
|||||||
"github.com/zilliztech/milvus-distributed/internal/querynode/client"
|
"github.com/zilliztech/milvus-distributed/internal/querynode/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadIndexService_FloatVector(t *testing.T) {
|
func TestLoadIndexService(t *testing.T) {
|
||||||
node := newQueryNode()
|
node := newQueryNode()
|
||||||
collectionID := rand.Int63n(1000000)
|
collectionID := rand.Int63n(1000000)
|
||||||
segmentID := rand.Int63n(1000000)
|
segmentID := rand.Int63n(1000000)
|
||||||
initTestMeta(t, node, "collection0", collectionID, segmentID)
|
initTestMeta(t, node, "collection0", collectionID, segmentID)
|
||||||
|
|
||||||
// loadIndexService and statsService
|
// loadIndexService and statsService
|
||||||
suffix := "-test-search" + strconv.FormatInt(rand.Int63n(1000000), 10)
|
|
||||||
oldSearchChannelNames := Params.SearchChannelNames
|
oldSearchChannelNames := Params.SearchChannelNames
|
||||||
newSearchChannelNames := makeNewChannelNames(oldSearchChannelNames, suffix)
|
var newSearchChannelNames []string
|
||||||
|
for _, channel := range oldSearchChannelNames {
|
||||||
|
newSearchChannelNames = append(newSearchChannelNames, channel+"new")
|
||||||
|
}
|
||||||
Params.SearchChannelNames = newSearchChannelNames
|
Params.SearchChannelNames = newSearchChannelNames
|
||||||
|
|
||||||
oldSearchResultChannelNames := Params.SearchChannelNames
|
oldSearchResultChannelNames := Params.SearchChannelNames
|
||||||
newSearchResultChannelNames := makeNewChannelNames(oldSearchResultChannelNames, suffix)
|
var newSearchResultChannelNames []string
|
||||||
|
for _, channel := range oldSearchResultChannelNames {
|
||||||
|
newSearchResultChannelNames = append(newSearchResultChannelNames, channel+"new")
|
||||||
|
}
|
||||||
Params.SearchResultChannelNames = newSearchResultChannelNames
|
Params.SearchResultChannelNames = newSearchResultChannelNames
|
||||||
|
|
||||||
oldLoadIndexChannelNames := Params.LoadIndexChannelNames
|
|
||||||
newLoadIndexChannelNames := makeNewChannelNames(oldLoadIndexChannelNames, suffix)
|
|
||||||
Params.LoadIndexChannelNames = newLoadIndexChannelNames
|
|
||||||
|
|
||||||
oldStatsChannelName := Params.StatsChannelName
|
|
||||||
newStatsChannelNames := makeNewChannelNames([]string{oldStatsChannelName}, suffix)
|
|
||||||
Params.StatsChannelName = newStatsChannelNames[0]
|
|
||||||
go node.Start()
|
go node.Start()
|
||||||
|
|
||||||
//generate insert data
|
//generate insert data
|
||||||
@ -331,319 +328,9 @@ func TestLoadIndexService_FloatVector(t *testing.T) {
|
|||||||
}
|
}
|
||||||
Params.SearchChannelNames = oldSearchChannelNames
|
Params.SearchChannelNames = oldSearchChannelNames
|
||||||
Params.SearchResultChannelNames = oldSearchResultChannelNames
|
Params.SearchResultChannelNames = oldSearchResultChannelNames
|
||||||
Params.LoadIndexChannelNames = oldLoadIndexChannelNames
|
|
||||||
Params.StatsChannelName = oldStatsChannelName
|
|
||||||
fmt.Println("loadIndex floatVector test Done!")
|
fmt.Println("loadIndex floatVector test Done!")
|
||||||
|
|
||||||
defer assert.Equal(t, findFiledStats, true)
|
defer assert.Equal(t, findFiledStats, true)
|
||||||
<-node.queryNodeLoopCtx.Done()
|
<-node.queryNodeLoopCtx.Done()
|
||||||
node.Close()
|
node.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadIndexService_BinaryVector(t *testing.T) {
|
|
||||||
node := newQueryNode()
|
|
||||||
collectionID := rand.Int63n(1000000)
|
|
||||||
segmentID := rand.Int63n(1000000)
|
|
||||||
initTestMeta(t, node, "collection0", collectionID, segmentID, true)
|
|
||||||
|
|
||||||
// loadIndexService and statsService
|
|
||||||
suffix := "-test-search-binary" + strconv.FormatInt(rand.Int63n(1000000), 10)
|
|
||||||
oldSearchChannelNames := Params.SearchChannelNames
|
|
||||||
newSearchChannelNames := makeNewChannelNames(oldSearchChannelNames, suffix)
|
|
||||||
Params.SearchChannelNames = newSearchChannelNames
|
|
||||||
|
|
||||||
oldSearchResultChannelNames := Params.SearchChannelNames
|
|
||||||
newSearchResultChannelNames := makeNewChannelNames(oldSearchResultChannelNames, suffix)
|
|
||||||
Params.SearchResultChannelNames = newSearchResultChannelNames
|
|
||||||
|
|
||||||
oldLoadIndexChannelNames := Params.LoadIndexChannelNames
|
|
||||||
newLoadIndexChannelNames := makeNewChannelNames(oldLoadIndexChannelNames, suffix)
|
|
||||||
Params.LoadIndexChannelNames = newLoadIndexChannelNames
|
|
||||||
|
|
||||||
oldStatsChannelName := Params.StatsChannelName
|
|
||||||
newStatsChannelNames := makeNewChannelNames([]string{oldStatsChannelName}, suffix)
|
|
||||||
Params.StatsChannelName = newStatsChannelNames[0]
|
|
||||||
go node.Start()
|
|
||||||
|
|
||||||
const msgLength = 1000
|
|
||||||
const receiveBufSize = 1024
|
|
||||||
const DIM = 128
|
|
||||||
|
|
||||||
// generator index data
|
|
||||||
var indexRowData []byte
|
|
||||||
for n := 0; n < msgLength; n++ {
|
|
||||||
for i := 0; i < DIM/8; i++ {
|
|
||||||
indexRowData = append(indexRowData, byte(rand.Intn(8)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//generator insert data
|
|
||||||
var insertRowBlob []*commonpb.Blob
|
|
||||||
var timestamps []uint64
|
|
||||||
var rowIDs []int64
|
|
||||||
var hashValues []uint32
|
|
||||||
offset := 0
|
|
||||||
for n := 0; n < msgLength; n++ {
|
|
||||||
rowData := make([]byte, 0)
|
|
||||||
rowData = append(rowData, indexRowData[offset:offset+(DIM/8)]...)
|
|
||||||
offset += DIM / 8
|
|
||||||
age := make([]byte, 4)
|
|
||||||
binary.LittleEndian.PutUint32(age, 1)
|
|
||||||
rowData = append(rowData, age...)
|
|
||||||
blob := &commonpb.Blob{
|
|
||||||
Value: rowData,
|
|
||||||
}
|
|
||||||
insertRowBlob = append(insertRowBlob, blob)
|
|
||||||
timestamps = append(timestamps, uint64(n))
|
|
||||||
rowIDs = append(rowIDs, int64(n))
|
|
||||||
hashValues = append(hashValues, uint32(n))
|
|
||||||
}
|
|
||||||
|
|
||||||
var insertMsg msgstream.TsMsg = &msgstream.InsertMsg{
|
|
||||||
BaseMsg: msgstream.BaseMsg{
|
|
||||||
HashValues: hashValues,
|
|
||||||
},
|
|
||||||
InsertRequest: internalpb.InsertRequest{
|
|
||||||
MsgType: internalpb.MsgType_kInsert,
|
|
||||||
ReqID: 0,
|
|
||||||
CollectionName: "collection0",
|
|
||||||
PartitionTag: "default",
|
|
||||||
SegmentID: segmentID,
|
|
||||||
ChannelID: int64(0),
|
|
||||||
ProxyID: int64(0),
|
|
||||||
Timestamps: timestamps,
|
|
||||||
RowIDs: rowIDs,
|
|
||||||
RowData: insertRowBlob,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
insertMsgPack := msgstream.MsgPack{
|
|
||||||
BeginTs: 0,
|
|
||||||
EndTs: math.MaxUint64,
|
|
||||||
Msgs: []msgstream.TsMsg{insertMsg},
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate timeTick
|
|
||||||
timeTickMsg := &msgstream.TimeTickMsg{
|
|
||||||
BaseMsg: msgstream.BaseMsg{
|
|
||||||
BeginTimestamp: 0,
|
|
||||||
EndTimestamp: 0,
|
|
||||||
HashValues: []uint32{0},
|
|
||||||
},
|
|
||||||
TimeTickMsg: internalpb.TimeTickMsg{
|
|
||||||
MsgType: internalpb.MsgType_kTimeTick,
|
|
||||||
PeerID: UniqueID(0),
|
|
||||||
Timestamp: math.MaxUint64,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
timeTickMsgPack := &msgstream.MsgPack{
|
|
||||||
Msgs: []msgstream.TsMsg{timeTickMsg},
|
|
||||||
}
|
|
||||||
|
|
||||||
// pulsar produce
|
|
||||||
insertChannels := Params.InsertChannelNames
|
|
||||||
ddChannels := Params.DDChannelNames
|
|
||||||
|
|
||||||
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
|
||||||
insertStream.SetPulsarClient(Params.PulsarAddress)
|
|
||||||
insertStream.CreatePulsarProducers(insertChannels)
|
|
||||||
ddStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
|
||||||
ddStream.SetPulsarClient(Params.PulsarAddress)
|
|
||||||
ddStream.CreatePulsarProducers(ddChannels)
|
|
||||||
|
|
||||||
var insertMsgStream msgstream.MsgStream = insertStream
|
|
||||||
insertMsgStream.Start()
|
|
||||||
var ddMsgStream msgstream.MsgStream = ddStream
|
|
||||||
ddMsgStream.Start()
|
|
||||||
|
|
||||||
err := insertMsgStream.Produce(&insertMsgPack)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = insertMsgStream.Broadcast(timeTickMsgPack)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = ddMsgStream.Broadcast(timeTickMsgPack)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
//generate search data and send search msg
|
|
||||||
searchRowData := indexRowData[42*(DIM/8) : 43*(DIM/8)]
|
|
||||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"JACCARD\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
|
||||||
placeholderValue := servicepb.PlaceholderValue{
|
|
||||||
Tag: "$0",
|
|
||||||
Type: servicepb.PlaceholderType_VECTOR_BINARY,
|
|
||||||
Values: [][]byte{searchRowData},
|
|
||||||
}
|
|
||||||
placeholderGroup := servicepb.PlaceholderGroup{
|
|
||||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
|
||||||
}
|
|
||||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("marshal placeholderGroup failed")
|
|
||||||
}
|
|
||||||
query := servicepb.Query{
|
|
||||||
CollectionName: "collection0",
|
|
||||||
PartitionTags: []string{"default"},
|
|
||||||
Dsl: dslString,
|
|
||||||
PlaceholderGroup: placeGroupByte,
|
|
||||||
}
|
|
||||||
queryByte, err := proto.Marshal(&query)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("marshal query failed")
|
|
||||||
}
|
|
||||||
blob := commonpb.Blob{
|
|
||||||
Value: queryByte,
|
|
||||||
}
|
|
||||||
fn := func(n int64) *msgstream.MsgPack {
|
|
||||||
searchMsg := &msgstream.SearchMsg{
|
|
||||||
BaseMsg: msgstream.BaseMsg{
|
|
||||||
HashValues: []uint32{0},
|
|
||||||
},
|
|
||||||
SearchRequest: internalpb.SearchRequest{
|
|
||||||
MsgType: internalpb.MsgType_kSearch,
|
|
||||||
ReqID: n,
|
|
||||||
ProxyID: int64(1),
|
|
||||||
Timestamp: uint64(msgLength),
|
|
||||||
ResultChannelID: int64(0),
|
|
||||||
Query: &blob,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return &msgstream.MsgPack{
|
|
||||||
Msgs: []msgstream.TsMsg{searchMsg},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
searchStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
|
||||||
searchStream.SetPulsarClient(Params.PulsarAddress)
|
|
||||||
searchStream.CreatePulsarProducers(newSearchChannelNames)
|
|
||||||
searchStream.Start()
|
|
||||||
err = searchStream.Produce(fn(1))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
//get search result
|
|
||||||
searchResultStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
|
||||||
searchResultStream.SetPulsarClient(Params.PulsarAddress)
|
|
||||||
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
|
|
||||||
searchResultStream.CreatePulsarConsumers(newSearchResultChannelNames, "loadIndexTestSubSearchResult2", unmarshalDispatcher, receiveBufSize)
|
|
||||||
searchResultStream.Start()
|
|
||||||
searchResult := searchResultStream.Consume()
|
|
||||||
assert.NotNil(t, searchResult)
|
|
||||||
unMarshaledHit := servicepb.Hits{}
|
|
||||||
err = proto.Unmarshal(searchResult.Msgs[0].(*msgstream.SearchResultMsg).Hits[0], &unMarshaledHit)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
// gen load index message pack
|
|
||||||
indexParams := make(map[string]string)
|
|
||||||
indexParams["index_type"] = "BIN_IVF_FLAT"
|
|
||||||
indexParams["index_mode"] = "cpu"
|
|
||||||
indexParams["dim"] = "128"
|
|
||||||
indexParams["k"] = "10"
|
|
||||||
indexParams["nlist"] = "100"
|
|
||||||
indexParams["nprobe"] = "10"
|
|
||||||
indexParams["m"] = "4"
|
|
||||||
indexParams["nbits"] = "8"
|
|
||||||
indexParams["metric_type"] = "JACCARD"
|
|
||||||
indexParams["SLICE_SIZE"] = "4"
|
|
||||||
|
|
||||||
var indexParamsKV []*commonpb.KeyValuePair
|
|
||||||
for key, value := range indexParams {
|
|
||||||
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
|
||||||
Key: key,
|
|
||||||
Value: value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// generator index
|
|
||||||
typeParams := make(map[string]string)
|
|
||||||
typeParams["dim"] = "128"
|
|
||||||
index, err := indexbuilder.NewCIndex(typeParams, indexParams)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = index.BuildBinaryVecIndexWithoutIds(indexRowData)
|
|
||||||
assert.Equal(t, err, nil)
|
|
||||||
|
|
||||||
option := &minioKV.Option{
|
|
||||||
Address: Params.MinioEndPoint,
|
|
||||||
AccessKeyID: Params.MinioAccessKeyID,
|
|
||||||
SecretAccessKeyID: Params.MinioSecretAccessKey,
|
|
||||||
UseSSL: Params.MinioUseSSLStr,
|
|
||||||
BucketName: Params.MinioBucketName,
|
|
||||||
CreateBucket: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
minioKV, err := minioKV.NewMinIOKV(node.queryNodeLoopCtx, option)
|
|
||||||
assert.Equal(t, err, nil)
|
|
||||||
//save index to minio
|
|
||||||
binarySet, err := index.Serialize()
|
|
||||||
assert.Equal(t, err, nil)
|
|
||||||
indexPaths := make([]string, 0)
|
|
||||||
for _, index := range binarySet {
|
|
||||||
path := strconv.Itoa(int(segmentID)) + "/" + index.Key
|
|
||||||
indexPaths = append(indexPaths, path)
|
|
||||||
minioKV.Save(path, string(index.Value))
|
|
||||||
}
|
|
||||||
|
|
||||||
//test index search result
|
|
||||||
indexResult, err := index.QueryOnBinaryVecIndexWithParam(searchRowData, indexParams)
|
|
||||||
assert.Equal(t, err, nil)
|
|
||||||
|
|
||||||
// create loadIndexClient
|
|
||||||
fieldID := UniqueID(100)
|
|
||||||
loadIndexChannelNames := Params.LoadIndexChannelNames
|
|
||||||
client := client.NewLoadIndexClient(node.queryNodeLoopCtx, Params.PulsarAddress, loadIndexChannelNames)
|
|
||||||
client.LoadIndex(indexPaths, segmentID, fieldID, "vec", indexParams)
|
|
||||||
|
|
||||||
// init message stream consumer and do checks
|
|
||||||
statsMs := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, Params.StatsReceiveBufSize)
|
|
||||||
statsMs.SetPulsarClient(Params.PulsarAddress)
|
|
||||||
statsMs.CreatePulsarConsumers([]string{Params.StatsChannelName}, Params.MsgChannelSubName, msgstream.NewUnmarshalDispatcher(), Params.StatsReceiveBufSize)
|
|
||||||
statsMs.Start()
|
|
||||||
|
|
||||||
findFiledStats := false
|
|
||||||
for {
|
|
||||||
receiveMsg := msgstream.MsgStream(statsMs).Consume()
|
|
||||||
assert.NotNil(t, receiveMsg)
|
|
||||||
assert.NotEqual(t, len(receiveMsg.Msgs), 0)
|
|
||||||
|
|
||||||
for _, msg := range receiveMsg.Msgs {
|
|
||||||
statsMsg, ok := msg.(*msgstream.QueryNodeStatsMsg)
|
|
||||||
if statsMsg.FieldStats == nil || len(statsMsg.FieldStats) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
findFiledStats = true
|
|
||||||
assert.Equal(t, ok, true)
|
|
||||||
assert.Equal(t, len(statsMsg.FieldStats), 1)
|
|
||||||
fieldStats0 := statsMsg.FieldStats[0]
|
|
||||||
assert.Equal(t, fieldStats0.FieldID, fieldID)
|
|
||||||
assert.Equal(t, fieldStats0.CollectionID, collectionID)
|
|
||||||
assert.Equal(t, len(fieldStats0.IndexStats), 1)
|
|
||||||
indexStats0 := fieldStats0.IndexStats[0]
|
|
||||||
params := indexStats0.IndexParams
|
|
||||||
// sort index params by key
|
|
||||||
sort.Slice(indexParamsKV, func(i, j int) bool { return indexParamsKV[i].Key < indexParamsKV[j].Key })
|
|
||||||
indexEqual := node.loadIndexService.indexParamsEqual(params, indexParamsKV)
|
|
||||||
assert.Equal(t, indexEqual, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if findFiledStats {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = searchStream.Produce(fn(2))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
searchResult = searchResultStream.Consume()
|
|
||||||
assert.NotNil(t, searchResult)
|
|
||||||
err = proto.Unmarshal(searchResult.Msgs[0].(*msgstream.SearchResultMsg).Hits[0], &unMarshaledHit)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
idsIndex := indexResult.IDs()
|
|
||||||
idsSegment := unMarshaledHit.IDs
|
|
||||||
assert.Equal(t, len(idsIndex), len(idsSegment))
|
|
||||||
for i := 0; i < len(idsIndex); i++ {
|
|
||||||
assert.Equal(t, idsIndex[i], idsSegment[i])
|
|
||||||
}
|
|
||||||
Params.SearchChannelNames = oldSearchChannelNames
|
|
||||||
Params.SearchResultChannelNames = oldSearchResultChannelNames
|
|
||||||
Params.LoadIndexChannelNames = oldLoadIndexChannelNames
|
|
||||||
Params.StatsChannelName = oldStatsChannelName
|
|
||||||
fmt.Println("loadIndex binaryVector test Done!")
|
|
||||||
|
|
||||||
defer assert.Equal(t, findFiledStats, true)
|
|
||||||
<-node.queryNodeLoopCtx.Done()
|
|
||||||
node.Close()
|
|
||||||
}
|
|
||||||
|
|||||||
@ -14,6 +14,12 @@ import "C"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/uber/jaeger-client-go"
|
||||||
|
"github.com/uber/jaeger-client-go/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type QueryNode struct {
|
type QueryNode struct {
|
||||||
@ -30,6 +36,10 @@ type QueryNode struct {
|
|||||||
searchService *searchService
|
searchService *searchService
|
||||||
loadIndexService *loadIndexService
|
loadIndexService *loadIndexService
|
||||||
statsService *statsService
|
statsService *statsService
|
||||||
|
|
||||||
|
//opentracing
|
||||||
|
tracer opentracing.Tracer
|
||||||
|
closer io.Closer
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
@ -39,31 +49,47 @@ func Init() {
|
|||||||
func NewQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
|
func NewQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
|
||||||
|
|
||||||
ctx1, cancel := context.WithCancel(ctx)
|
ctx1, cancel := context.WithCancel(ctx)
|
||||||
|
q := &QueryNode{
|
||||||
segmentsMap := make(map[int64]*Segment)
|
|
||||||
collections := make([]*Collection, 0)
|
|
||||||
|
|
||||||
tSafe := newTSafe()
|
|
||||||
|
|
||||||
var replica collectionReplica = &collectionReplicaImpl{
|
|
||||||
collections: collections,
|
|
||||||
segments: segmentsMap,
|
|
||||||
|
|
||||||
tSafe: tSafe,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &QueryNode{
|
|
||||||
queryNodeLoopCtx: ctx1,
|
queryNodeLoopCtx: ctx1,
|
||||||
queryNodeLoopCancel: cancel,
|
queryNodeLoopCancel: cancel,
|
||||||
QueryNodeID: queryNodeID,
|
QueryNodeID: queryNodeID,
|
||||||
|
|
||||||
replica: replica,
|
|
||||||
|
|
||||||
dataSyncService: nil,
|
dataSyncService: nil,
|
||||||
metaService: nil,
|
metaService: nil,
|
||||||
searchService: nil,
|
searchService: nil,
|
||||||
statsService: nil,
|
statsService: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
cfg := &config.Configuration{
|
||||||
|
ServiceName: "query_node",
|
||||||
|
Sampler: &config.SamplerConfig{
|
||||||
|
Type: "const",
|
||||||
|
Param: 1,
|
||||||
|
},
|
||||||
|
Reporter: &config.ReporterConfig{
|
||||||
|
LogSpans: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
q.tracer, q.closer, err = cfg.NewTracer(config.Logger(jaeger.StdLogger))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||||
|
}
|
||||||
|
opentracing.SetGlobalTracer(q.tracer)
|
||||||
|
|
||||||
|
segmentsMap := make(map[int64]*Segment)
|
||||||
|
collections := make([]*Collection, 0)
|
||||||
|
|
||||||
|
tSafe := newTSafe()
|
||||||
|
|
||||||
|
q.replica = &collectionReplicaImpl{
|
||||||
|
collections: collections,
|
||||||
|
segments: segmentsMap,
|
||||||
|
|
||||||
|
tSafe: tSafe,
|
||||||
|
}
|
||||||
|
|
||||||
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func (node *QueryNode) Start() error {
|
func (node *QueryNode) Start() error {
|
||||||
@ -97,10 +123,11 @@ func (node *QueryNode) Close() {
|
|||||||
if node.searchService != nil {
|
if node.searchService != nil {
|
||||||
node.searchService.close()
|
node.searchService.close()
|
||||||
}
|
}
|
||||||
if node.loadIndexService != nil {
|
|
||||||
node.loadIndexService.close()
|
|
||||||
}
|
|
||||||
if node.statsService != nil {
|
if node.statsService != nil {
|
||||||
node.statsService.close()
|
node.statsService.close()
|
||||||
}
|
}
|
||||||
|
if node.closer != nil {
|
||||||
|
node.closer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -35,7 +35,7 @@ func genTestCollectionMeta(collectionName string, collectionID UniqueID, isBinar
|
|||||||
TypeParams: []*commonpb.KeyValuePair{
|
TypeParams: []*commonpb.KeyValuePair{
|
||||||
{
|
{
|
||||||
Key: "dim",
|
Key: "dim",
|
||||||
Value: "128",
|
Value: "16",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
IndexParams: []*commonpb.KeyValuePair{
|
IndexParams: []*commonpb.KeyValuePair{
|
||||||
@ -92,12 +92,8 @@ func genTestCollectionMeta(collectionName string, collectionID UniqueID, isBinar
|
|||||||
return &collectionMeta
|
return &collectionMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestMeta(t *testing.T, node *QueryNode, collectionName string, collectionID UniqueID, segmentID UniqueID, optional ...bool) {
|
func initTestMeta(t *testing.T, node *QueryNode, collectionName string, collectionID UniqueID, segmentID UniqueID) {
|
||||||
isBinary := false
|
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
|
||||||
if len(optional) > 0 {
|
|
||||||
isBinary = optional[0]
|
|
||||||
}
|
|
||||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID, isBinary)
|
|
||||||
|
|
||||||
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
|
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
|
||||||
assert.NotEqual(t, "", schemaBlob)
|
assert.NotEqual(t, "", schemaBlob)
|
||||||
|
|||||||
@ -5,8 +5,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
oplog "github.com/opentracing/opentracing-go/log"
|
||||||
"log"
|
"log"
|
||||||
"regexp"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
@ -134,22 +135,27 @@ func (ss *searchService) receiveSearchMsg() {
|
|||||||
}
|
}
|
||||||
searchMsg := make([]msgstream.TsMsg, 0)
|
searchMsg := make([]msgstream.TsMsg, 0)
|
||||||
serverTime := ss.getServiceableTime()
|
serverTime := ss.getServiceableTime()
|
||||||
for i := range msgPack.Msgs {
|
for i, msg := range msgPack.Msgs {
|
||||||
if msgPack.Msgs[i].BeginTs() > serverTime {
|
if msg.BeginTs() > serverTime {
|
||||||
ss.msgBuffer <- msgPack.Msgs[i]
|
ss.msgBuffer <- msg
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
searchMsg = append(searchMsg, msgPack.Msgs[i])
|
searchMsg = append(searchMsg, msgPack.Msgs[i])
|
||||||
}
|
}
|
||||||
for _, msg := range searchMsg {
|
for _, msg := range searchMsg {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(msg.GetMsgContext(), "receive search msg")
|
||||||
|
msg.SetMsgContext(ctx)
|
||||||
err := ss.search(msg)
|
err := ss.search(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
err2 := ss.publishFailedSearchResult(msg, err.Error())
|
err2 := ss.publishFailedSearchResult(msg, err.Error())
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
|
span.LogFields(oplog.Error(err2))
|
||||||
log.Println("publish FailedSearchResult failed, error message: ", err2)
|
log.Println("publish FailedSearchResult failed, error message: ", err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
span.Finish()
|
||||||
}
|
}
|
||||||
log.Println("ReceiveSearchMsg, do search done, num of searchMsg = ", len(searchMsg))
|
log.Println("ReceiveSearchMsg, do search done, num of searchMsg = ", len(searchMsg))
|
||||||
}
|
}
|
||||||
@ -211,8 +217,12 @@ func (ss *searchService) doUnsolvedMsgSearch() {
|
|||||||
// TODO:: cache map[dsl]plan
|
// TODO:: cache map[dsl]plan
|
||||||
// TODO: reBatched search requests
|
// TODO: reBatched search requests
|
||||||
func (ss *searchService) search(msg msgstream.TsMsg) error {
|
func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(msg.GetMsgContext(), "do search")
|
||||||
|
defer span.Finish()
|
||||||
|
msg.SetMsgContext(ctx)
|
||||||
searchMsg, ok := msg.(*msgstream.SearchMsg)
|
searchMsg, ok := msg.(*msgstream.SearchMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
span.LogFields(oplog.Error(errors.New("invalid request type = " + string(msg.Type()))))
|
||||||
return errors.New("invalid request type = " + string(msg.Type()))
|
return errors.New("invalid request type = " + string(msg.Type()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,23 +231,27 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
query := servicepb.Query{}
|
query := servicepb.Query{}
|
||||||
err := proto.Unmarshal(queryBlob, &query)
|
err := proto.Unmarshal(queryBlob, &query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return errors.New("unmarshal query failed")
|
return errors.New("unmarshal query failed")
|
||||||
}
|
}
|
||||||
collectionName := query.CollectionName
|
collectionName := query.CollectionName
|
||||||
partitionTagsInQuery := query.PartitionTags
|
partitionTags := query.PartitionTags
|
||||||
collection, err := ss.replica.getCollectionByName(collectionName)
|
collection, err := ss.replica.getCollectionByName(collectionName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
collectionID := collection.ID()
|
collectionID := collection.ID()
|
||||||
dsl := query.Dsl
|
dsl := query.Dsl
|
||||||
plan, err := createPlan(*collection, dsl)
|
plan, err := createPlan(*collection, dsl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
placeHolderGroupBlob := query.PlaceholderGroup
|
placeHolderGroupBlob := query.PlaceholderGroup
|
||||||
placeholderGroup, err := parserPlaceholderGroup(plan, placeHolderGroupBlob)
|
placeholderGroup, err := parserPlaceholderGroup(plan, placeHolderGroupBlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
placeholderGroups := make([]*PlaceholderGroup, 0)
|
placeholderGroups := make([]*PlaceholderGroup, 0)
|
||||||
@ -246,28 +260,15 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
searchResults := make([]*SearchResult, 0)
|
searchResults := make([]*SearchResult, 0)
|
||||||
matchedSegments := make([]*Segment, 0)
|
matchedSegments := make([]*Segment, 0)
|
||||||
|
|
||||||
fmt.Println("search msg's partitionTag = ", partitionTagsInQuery)
|
for _, partitionTag := range partitionTags {
|
||||||
|
hasPartition := ss.replica.hasPartition(collectionID, partitionTag)
|
||||||
var partitionTagsInCol []string
|
if !hasPartition {
|
||||||
for _, partition := range collection.partitions {
|
span.LogFields(oplog.Error(errors.New("search Failed, invalid partitionTag")))
|
||||||
partitionTag := partition.partitionTag
|
return errors.New("search Failed, invalid partitionTag")
|
||||||
partitionTagsInCol = append(partitionTagsInCol, partitionTag)
|
|
||||||
}
|
|
||||||
var searchPartitionTag []string
|
|
||||||
if len(partitionTagsInQuery) == 0 {
|
|
||||||
searchPartitionTag = partitionTagsInCol
|
|
||||||
} else {
|
|
||||||
for _, tag := range partitionTagsInCol {
|
|
||||||
for _, toMatchTag := range partitionTagsInQuery {
|
|
||||||
re := regexp.MustCompile("^" + toMatchTag + "$")
|
|
||||||
if re.MatchString(tag) {
|
|
||||||
searchPartitionTag = append(searchPartitionTag, tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, partitionTag := range searchPartitionTag {
|
for _, partitionTag := range partitionTags {
|
||||||
partition, _ := ss.replica.getPartitionByTag(collectionID, partitionTag)
|
partition, _ := ss.replica.getPartitionByTag(collectionID, partitionTag)
|
||||||
for _, segment := range partition.segments {
|
for _, segment := range partition.segments {
|
||||||
//fmt.Println("dsl = ", dsl)
|
//fmt.Println("dsl = ", dsl)
|
||||||
@ -275,6 +276,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
searchResult, err := segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
|
searchResult, err := segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
searchResults = append(searchResults, searchResult)
|
searchResults = append(searchResults, searchResult)
|
||||||
@ -283,17 +285,6 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(searchResults) <= 0 {
|
if len(searchResults) <= 0 {
|
||||||
for _, group := range placeholderGroups {
|
|
||||||
nq := group.getNumOfQuery()
|
|
||||||
nilHits := make([][]byte, nq)
|
|
||||||
hit := &servicepb.Hits{}
|
|
||||||
for i := 0; i < int(nq); i++ {
|
|
||||||
bs, err := proto.Marshal(hit)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nilHits[i] = bs
|
|
||||||
}
|
|
||||||
var results = internalpb.SearchResult{
|
var results = internalpb.SearchResult{
|
||||||
MsgType: internalpb.MsgType_kSearchResult,
|
MsgType: internalpb.MsgType_kSearchResult,
|
||||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
|
||||||
@ -302,36 +293,44 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
QueryNodeID: ss.queryNodeID,
|
QueryNodeID: ss.queryNodeID,
|
||||||
Timestamp: searchTimestamp,
|
Timestamp: searchTimestamp,
|
||||||
ResultChannelID: searchMsg.ResultChannelID,
|
ResultChannelID: searchMsg.ResultChannelID,
|
||||||
Hits: nilHits,
|
Hits: nil,
|
||||||
}
|
}
|
||||||
searchResultMsg := &msgstream.SearchResultMsg{
|
searchResultMsg := &msgstream.SearchResultMsg{
|
||||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
BaseMsg: msgstream.BaseMsg{
|
||||||
|
MsgCtx: searchMsg.MsgCtx,
|
||||||
|
HashValues: []uint32{uint32(searchMsg.ResultChannelID)},
|
||||||
|
},
|
||||||
SearchResult: results,
|
SearchResult: results,
|
||||||
}
|
}
|
||||||
err = ss.publishSearchResult(searchResultMsg)
|
err = ss.publishSearchResult(searchResultMsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
span.LogFields(oplog.String("publish search research success", "publish search research success"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
inReduced := make([]bool, len(searchResults))
|
inReduced := make([]bool, len(searchResults))
|
||||||
numSegment := int64(len(searchResults))
|
numSegment := int64(len(searchResults))
|
||||||
err2 := reduceSearchResults(searchResults, numSegment, inReduced)
|
err2 := reduceSearchResults(searchResults, numSegment, inReduced)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
|
span.LogFields(oplog.Error(err2))
|
||||||
return err2
|
return err2
|
||||||
}
|
}
|
||||||
err = fillTargetEntry(plan, searchResults, matchedSegments, inReduced)
|
err = fillTargetEntry(plan, searchResults, matchedSegments, inReduced)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
marshaledHits, err := reorganizeQueryResults(plan, placeholderGroups, searchResults, numSegment, inReduced)
|
marshaledHits, err := reorganizeQueryResults(plan, placeholderGroups, searchResults, numSegment, inReduced)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hitsBlob, err := marshaledHits.getHitsBlob()
|
hitsBlob, err := marshaledHits.getHitsBlob()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,11 +365,14 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
MetricType: plan.getMetricType(),
|
MetricType: plan.getMetricType(),
|
||||||
}
|
}
|
||||||
searchResultMsg := &msgstream.SearchResultMsg{
|
searchResultMsg := &msgstream.SearchResultMsg{
|
||||||
BaseMsg: msgstream.BaseMsg{HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
BaseMsg: msgstream.BaseMsg{
|
||||||
|
MsgCtx: searchMsg.MsgCtx,
|
||||||
|
HashValues: []uint32{uint32(searchMsg.ResultChannelID)}},
|
||||||
SearchResult: results,
|
SearchResult: results,
|
||||||
}
|
}
|
||||||
err = ss.publishSearchResult(searchResultMsg)
|
err = ss.publishSearchResult(searchResultMsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.LogFields(oplog.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -383,6 +385,9 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
|
func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(msg.GetMsgContext(), "publish search result")
|
||||||
|
defer span.Finish()
|
||||||
|
msg.SetMsgContext(ctx)
|
||||||
fmt.Println("Public SearchResult", msg.HashKeys())
|
fmt.Println("Public SearchResult", msg.HashKeys())
|
||||||
msgPack := msgstream.MsgPack{}
|
msgPack := msgstream.MsgPack{}
|
||||||
msgPack.Msgs = append(msgPack.Msgs, msg)
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
||||||
@ -391,7 +396,9 @@ func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg string) error {
|
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg string) error {
|
||||||
fmt.Println("Public fail SearchResult!")
|
span, ctx := opentracing.StartSpanFromContext(msg.GetMsgContext(), "receive search msg")
|
||||||
|
defer span.Finish()
|
||||||
|
msg.SetMsgContext(ctx)
|
||||||
msgPack := msgstream.MsgPack{}
|
msgPack := msgstream.MsgPack{}
|
||||||
searchMsg, ok := msg.(*msgstream.SearchMsg)
|
searchMsg, ok := msg.(*msgstream.SearchMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
@ -13,7 +13,6 @@ package querynode
|
|||||||
import "C"
|
import "C"
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -22,8 +21,6 @@ import (
|
|||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
type indexParam = map[string]string
|
|
||||||
|
|
||||||
type Segment struct {
|
type Segment struct {
|
||||||
segmentPtr C.CSegmentBase
|
segmentPtr C.CSegmentBase
|
||||||
segmentID UniqueID
|
segmentID UniqueID
|
||||||
@ -31,42 +28,25 @@ type Segment struct {
|
|||||||
collectionID UniqueID
|
collectionID UniqueID
|
||||||
lastMemSize int64
|
lastMemSize int64
|
||||||
lastRowCount int64
|
lastRowCount int64
|
||||||
mu sync.Mutex
|
|
||||||
recentlyModified bool
|
recentlyModified bool
|
||||||
indexParam map[int64]indexParam
|
|
||||||
paramMutex sync.RWMutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Segment) ID() UniqueID {
|
func (s *Segment) ID() UniqueID {
|
||||||
return s.segmentID
|
return s.segmentID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Segment) SetRecentlyModified(modify bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.recentlyModified = modify
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Segment) GetRecentlyModified() bool {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
return s.recentlyModified
|
|
||||||
}
|
|
||||||
|
|
||||||
//-------------------------------------------------------------------------------------- constructor and destructor
|
//-------------------------------------------------------------------------------------- constructor and destructor
|
||||||
func newSegment(collection *Collection, segmentID int64, partitionTag string, collectionID UniqueID) *Segment {
|
func newSegment(collection *Collection, segmentID int64, partitionTag string, collectionID UniqueID) *Segment {
|
||||||
/*
|
/*
|
||||||
CSegmentBase
|
CSegmentBase
|
||||||
newSegment(CPartition partition, unsigned long segment_id);
|
newSegment(CPartition partition, unsigned long segment_id);
|
||||||
*/
|
*/
|
||||||
initIndexParam := make(map[int64]indexParam)
|
|
||||||
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID))
|
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID))
|
||||||
var newSegment = &Segment{
|
var newSegment = &Segment{
|
||||||
segmentPtr: segmentPtr,
|
segmentPtr: segmentPtr,
|
||||||
segmentID: segmentID,
|
segmentID: segmentID,
|
||||||
partitionTag: partitionTag,
|
partitionTag: partitionTag,
|
||||||
collectionID: collectionID,
|
collectionID: collectionID,
|
||||||
indexParam: initIndexParam,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return newSegment
|
return newSegment
|
||||||
@ -181,7 +161,7 @@ func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps
|
|||||||
return errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
return errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.SetRecentlyModified(true)
|
s.recentlyModified = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,39 +256,3 @@ func (s *Segment) updateSegmentIndex(loadIndexInfo *LoadIndexInfo) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Segment) setIndexParam(fieldID int64, indexParamKv []*commonpb.KeyValuePair) error {
|
|
||||||
s.paramMutex.Lock()
|
|
||||||
defer s.paramMutex.Unlock()
|
|
||||||
indexParamMap := make(indexParam)
|
|
||||||
if indexParamKv == nil {
|
|
||||||
return errors.New("loadIndexMsg's indexParam empty")
|
|
||||||
}
|
|
||||||
for _, param := range indexParamKv {
|
|
||||||
indexParamMap[param.Key] = param.Value
|
|
||||||
}
|
|
||||||
s.indexParam[fieldID] = indexParamMap
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Segment) matchIndexParam(fieldID int64, indexParamKv []*commonpb.KeyValuePair) bool {
|
|
||||||
s.paramMutex.RLock()
|
|
||||||
defer s.paramMutex.RUnlock()
|
|
||||||
fieldIndexParam := s.indexParam[fieldID]
|
|
||||||
if fieldIndexParam == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
paramSize := len(s.indexParam)
|
|
||||||
matchCount := 0
|
|
||||||
for _, param := range indexParamKv {
|
|
||||||
value, ok := fieldIndexParam[param.Key]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if param.Value != value {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
matchCount++
|
|
||||||
}
|
|
||||||
return paramSize == matchCount
|
|
||||||
}
|
|
||||||
|
|||||||
@ -44,8 +44,6 @@ func newTSafe() tSafe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ts *tSafeImpl) registerTSafeWatcher(t *tSafeWatcher) {
|
func (ts *tSafeImpl) registerTSafeWatcher(t *tSafeWatcher) {
|
||||||
ts.tSafeMu.Lock()
|
|
||||||
defer ts.tSafeMu.Unlock()
|
|
||||||
ts.watcherList = append(ts.watcherList, t)
|
ts.watcherList = append(ts.watcherList, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,9 +55,8 @@ func (ts *tSafeImpl) get() Timestamp {
|
|||||||
|
|
||||||
func (ts *tSafeImpl) set(t Timestamp) {
|
func (ts *tSafeImpl) set(t Timestamp) {
|
||||||
ts.tSafeMu.Lock()
|
ts.tSafeMu.Lock()
|
||||||
defer ts.tSafeMu.Unlock()
|
|
||||||
|
|
||||||
ts.tSafe = t
|
ts.tSafe = t
|
||||||
|
ts.tSafeMu.Unlock()
|
||||||
for _, watcher := range ts.watcherList {
|
for _, watcher := range ts.watcherList {
|
||||||
watcher.notify()
|
watcher.notify()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,8 +1,12 @@
|
|||||||
package flowgraph
|
package flowgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,11 +29,33 @@ func (inNode *InputNode) InStream() *msgstream.MsgStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// empty input and return one *Msg
|
// empty input and return one *Msg
|
||||||
func (inNode *InputNode) Operate(in []*Msg) []*Msg {
|
func (inNode *InputNode) Operate([]*Msg) []*Msg {
|
||||||
//fmt.Println("Do InputNode operation")
|
//fmt.Println("Do InputNode operation")
|
||||||
|
|
||||||
msgPack := (*inNode.inStream).Consume()
|
msgPack := (*inNode.inStream).Consume()
|
||||||
|
|
||||||
|
var childs []opentracing.Span
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
if tracer != nil && msgPack != nil {
|
||||||
|
for _, msg := range msgPack.Msgs {
|
||||||
|
if msg.Type() == internalpb.MsgType_kInsert {
|
||||||
|
var child opentracing.Span
|
||||||
|
ctx := msg.GetMsgContext()
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan(fmt.Sprintf("through msg input node, start time = %d", msg.BeginTs()),
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan(fmt.Sprintf("through msg input node, start time = %d", msg.BeginTs()))
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", msg.HashKeys())
|
||||||
|
child.SetTag("start time", msg.BeginTs())
|
||||||
|
child.SetTag("end time", msg.EndTs())
|
||||||
|
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
|
||||||
|
childs = append(childs, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: add status
|
// TODO: add status
|
||||||
if msgPack == nil {
|
if msgPack == nil {
|
||||||
log.Println("null msg pack")
|
log.Println("null msg pack")
|
||||||
@ -42,6 +68,10 @@ func (inNode *InputNode) Operate(in []*Msg) []*Msg {
|
|||||||
timestampMax: msgPack.EndTs,
|
timestampMax: msgPack.EndTs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, child := range childs {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
|
|
||||||
return []*Msg{&msgStreamMsg}
|
return []*Msg{&msgStreamMsg}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -103,6 +103,8 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
|||||||
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
|
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var flush bool = false
|
||||||
|
var flushSegID UniqueID
|
||||||
// do dd tasks
|
// do dd tasks
|
||||||
for _, msg := range tsMessages {
|
for _, msg := range tsMessages {
|
||||||
switch msg.Type() {
|
switch msg.Type() {
|
||||||
@ -116,36 +118,16 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
|||||||
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
|
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
|
||||||
case internalPb.MsgType_kFlush:
|
case internalPb.MsgType_kFlush:
|
||||||
fMsg := msg.(*msgstream.FlushMsg)
|
fMsg := msg.(*msgstream.FlushMsg)
|
||||||
flushSegID := fMsg.SegmentID
|
flush = true
|
||||||
|
flushSegID = fMsg.SegmentID
|
||||||
ddMsg.flushMessages = append(ddMsg.flushMessages, fMsg)
|
ddMsg.flushMessages = append(ddMsg.flushMessages, fMsg)
|
||||||
ddNode.flush()
|
|
||||||
|
|
||||||
log.Println(".. manual flush completed ...")
|
|
||||||
ddlFlushMsg := &ddlFlushSyncMsg{
|
|
||||||
flushCompleted: true,
|
|
||||||
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
|
||||||
segID: flushSegID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ddNode.outCh <- ddlFlushMsg
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Println("Non supporting message type:", msg.Type())
|
log.Println("Non supporting message type:", msg.Type())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate binlog
|
// generate binlog
|
||||||
if ddNode.ddBuffer.full() {
|
if ddNode.ddBuffer.full() || flush {
|
||||||
ddNode.flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
var res Msg = ddNode.ddMsg
|
|
||||||
return []*Msg{&res}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ddNode *ddNode) flush() {
|
|
||||||
// generate binlog
|
|
||||||
log.Println(". dd buffer full or receive Flush msg ...")
|
log.Println(". dd buffer full or receive Flush msg ...")
|
||||||
ddCodec := &storage.DataDefinitionCodec{}
|
ddCodec := &storage.DataDefinitionCodec{}
|
||||||
for collectionID, data := range ddNode.ddBuffer.ddData {
|
for collectionID, data := range ddNode.ddBuffer.ddData {
|
||||||
@ -210,6 +192,24 @@ func (ddNode *ddNode) flush() {
|
|||||||
}
|
}
|
||||||
// clear buffer
|
// clear buffer
|
||||||
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
|
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
|
||||||
|
}
|
||||||
|
|
||||||
|
if flush {
|
||||||
|
|
||||||
|
log.Println(".. manual flush completed ...")
|
||||||
|
ddlFlushMsg := &ddlFlushSyncMsg{
|
||||||
|
flushCompleted: true,
|
||||||
|
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
||||||
|
segID: flushSegID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ddNode.outCh <- ddlFlushMsg
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var res Msg = ddNode.ddMsg
|
||||||
|
return []*Msg{&res}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
||||||
|
|||||||
@ -1,9 +1,12 @@
|
|||||||
package writenode
|
package writenode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
@ -32,11 +35,34 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var childs []opentracing.Span
|
||||||
|
tracer := opentracing.GlobalTracer()
|
||||||
|
if tracer != nil {
|
||||||
|
for _, msg := range msgStreamMsg.TsMessages() {
|
||||||
|
if msg.Type() == internalPb.MsgType_kInsert {
|
||||||
|
var child opentracing.Span
|
||||||
|
ctx := msg.GetMsgContext()
|
||||||
|
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||||
|
child = tracer.StartSpan("pass filter node",
|
||||||
|
opentracing.FollowsFrom(parent.Context()))
|
||||||
|
} else {
|
||||||
|
child = tracer.StartSpan("pass filter node")
|
||||||
|
}
|
||||||
|
child.SetTag("hash keys", msg.HashKeys())
|
||||||
|
child.SetTag("start time", msg.BeginTs())
|
||||||
|
child.SetTag("end time", msg.EndTs())
|
||||||
|
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
|
||||||
|
childs = append(childs, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ddMsg, ok := (*in[1]).(*ddMsg)
|
ddMsg, ok := (*in[1]).(*ddMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Println("type assertion failed for ddMsg")
|
log.Println("type assertion failed for ddMsg")
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
fdmNode.ddMsg = ddMsg
|
fdmNode.ddMsg = ddMsg
|
||||||
|
|
||||||
var iMsg = insertMsg{
|
var iMsg = insertMsg{
|
||||||
@ -57,11 +83,20 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, msg := range msgStreamMsg.TsMessages() {
|
for key, msg := range msgStreamMsg.TsMessages() {
|
||||||
switch msg.Type() {
|
switch msg.Type() {
|
||||||
case internalPb.MsgType_kInsert:
|
case internalPb.MsgType_kInsert:
|
||||||
|
var ctx2 context.Context
|
||||||
|
if childs != nil {
|
||||||
|
if childs[key] != nil {
|
||||||
|
ctx2 = opentracing.ContextWithSpan(msg.GetMsgContext(), childs[key])
|
||||||
|
} else {
|
||||||
|
ctx2 = context.Background()
|
||||||
|
}
|
||||||
|
}
|
||||||
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
||||||
if resMsg != nil {
|
if resMsg != nil {
|
||||||
|
resMsg.SetMsgContext(ctx2)
|
||||||
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
||||||
}
|
}
|
||||||
// case internalPb.MsgType_kDelete:
|
// case internalPb.MsgType_kDelete:
|
||||||
@ -73,6 +108,9 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
|||||||
|
|
||||||
iMsg.gcRecord = ddMsg.gcRecord
|
iMsg.gcRecord = ddMsg.gcRecord
|
||||||
var res Msg = &iMsg
|
var res Msg = &iMsg
|
||||||
|
for _, child := range childs {
|
||||||
|
child.Finish()
|
||||||
|
}
|
||||||
return []*Msg{&res}
|
return []*Msg{&res}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -4,11 +4,15 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
oplog "github.com/opentracing/opentracing-go/log"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||||
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||||
@ -96,12 +100,23 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
|||||||
// iMsg is insertMsg
|
// iMsg is insertMsg
|
||||||
// 1. iMsg -> buffer
|
// 1. iMsg -> buffer
|
||||||
for _, msg := range iMsg.insertMessages {
|
for _, msg := range iMsg.insertMessages {
|
||||||
|
ctx := msg.GetMsgContext()
|
||||||
|
var span opentracing.Span
|
||||||
|
if ctx != nil {
|
||||||
|
span, _ = opentracing.StartSpanFromContext(ctx, fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
|
||||||
|
} else {
|
||||||
|
span = opentracing.StartSpan(fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
|
||||||
|
}
|
||||||
|
span.SetTag("hash keys", msg.HashKeys())
|
||||||
|
span.SetTag("start time", msg.BeginTs())
|
||||||
|
span.SetTag("end time", msg.EndTs())
|
||||||
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
||||||
log.Println("Error: misaligned messages detected")
|
log.Println("Error: misaligned messages detected")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
currentSegID := msg.GetSegmentID()
|
currentSegID := msg.GetSegmentID()
|
||||||
collectionName := msg.GetCollectionName()
|
collectionName := msg.GetCollectionName()
|
||||||
|
span.LogFields(oplog.Int("segment id", int(currentSegID)))
|
||||||
|
|
||||||
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
|
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -110,6 +125,21 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Timestamps
|
||||||
|
_, ok = idata.Data[1].(*storage.Int64FieldData)
|
||||||
|
if !ok {
|
||||||
|
idata.Data[1] = &storage.Int64FieldData{
|
||||||
|
Data: []int64{},
|
||||||
|
NumRows: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tsData := idata.Data[1].(*storage.Int64FieldData)
|
||||||
|
for _, ts := range msg.Timestamps {
|
||||||
|
tsData.Data = append(tsData.Data, int64(ts))
|
||||||
|
}
|
||||||
|
tsData.NumRows += len(msg.Timestamps)
|
||||||
|
span.LogFields(oplog.Int("tsData numRows", tsData.NumRows))
|
||||||
|
|
||||||
// 1.1 Get CollectionMeta from etcd
|
// 1.1 Get CollectionMeta from etcd
|
||||||
collection, err := ibNode.replica.getCollectionByName(collectionName)
|
collection, err := ibNode.replica.getCollectionByName(collectionName)
|
||||||
//collSchema, err := ibNode.getCollectionSchemaByName(collectionName)
|
//collSchema, err := ibNode.getCollectionSchemaByName(collectionName)
|
||||||
@ -358,9 +388,11 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
|||||||
|
|
||||||
// 1.3 store in buffer
|
// 1.3 store in buffer
|
||||||
ibNode.insertBuffer.insertData[currentSegID] = idata
|
ibNode.insertBuffer.insertData[currentSegID] = idata
|
||||||
|
span.LogFields(oplog.String("store in buffer", "store in buffer"))
|
||||||
|
|
||||||
// 1.4 if full
|
// 1.4 if full
|
||||||
// 1.4.1 generate binlogs
|
// 1.4.1 generate binlogs
|
||||||
|
span.LogFields(oplog.String("generate binlogs", "generate binlogs"))
|
||||||
if ibNode.insertBuffer.full(currentSegID) {
|
if ibNode.insertBuffer.full(currentSegID) {
|
||||||
log.Printf(". Insert Buffer full, auto flushing (%v) rows of data...", ibNode.insertBuffer.size(currentSegID))
|
log.Printf(". Insert Buffer full, auto flushing (%v) rows of data...", ibNode.insertBuffer.size(currentSegID))
|
||||||
// partitionTag -> partitionID
|
// partitionTag -> partitionID
|
||||||
@ -429,6 +461,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
|||||||
ibNode.outCh <- inBinlogMsg
|
ibNode.outCh <- inBinlogMsg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
span.Finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(iMsg.insertMessages) > 0 {
|
if len(iMsg.insertMessages) > 0 {
|
||||||
|
|||||||
@ -112,7 +112,6 @@ func (fService *flushSyncService) start() {
|
|||||||
fService.completeInsertFlush(insertFlushMsg.segID)
|
fService.completeInsertFlush(insertFlushMsg.segID)
|
||||||
|
|
||||||
if fService.FlushCompleted(insertFlushMsg.segID) {
|
if fService.FlushCompleted(insertFlushMsg.segID) {
|
||||||
log.Printf("Seg(%d) flush completed.", insertFlushMsg.segID)
|
|
||||||
fService.metaTable.CompleteFlush(insertFlushMsg.ts, insertFlushMsg.segID)
|
fService.metaTable.CompleteFlush(insertFlushMsg.ts, insertFlushMsg.segID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -90,7 +90,7 @@ func TestFlushSyncService_Start(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if len(ddChan) == 0 && len(insertChan) == 0 && fService.FlushCompleted(SegID) {
|
if len(ddChan) == 0 && len(insertChan) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,6 +117,10 @@ func TestFlushSyncService_Start(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, true, cp)
|
assert.Equal(t, true, cp)
|
||||||
|
|
||||||
|
cp, err = fService.metaTable.checkFlushComplete(SegID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, true, cp)
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -171,8 +171,8 @@ func (mt *metaTable) addSegmentFlush(segmentID UniqueID, timestamp Timestamp) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mt *metaTable) getFlushCloseTime(segmentID UniqueID) (Timestamp, error) {
|
func (mt *metaTable) getFlushCloseTime(segmentID UniqueID) (Timestamp, error) {
|
||||||
mt.lock.RLock()
|
mt.lock.Lock()
|
||||||
defer mt.lock.RUnlock()
|
defer mt.lock.Unlock()
|
||||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||||
@ -181,8 +181,8 @@ func (mt *metaTable) getFlushCloseTime(segmentID UniqueID) (Timestamp, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mt *metaTable) getFlushOpenTime(segmentID UniqueID) (Timestamp, error) {
|
func (mt *metaTable) getFlushOpenTime(segmentID UniqueID) (Timestamp, error) {
|
||||||
mt.lock.RLock()
|
mt.lock.Lock()
|
||||||
defer mt.lock.RUnlock()
|
defer mt.lock.Unlock()
|
||||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||||
@ -191,8 +191,8 @@ func (mt *metaTable) getFlushOpenTime(segmentID UniqueID) (Timestamp, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mt *metaTable) checkFlushComplete(segmentID UniqueID) (bool, error) {
|
func (mt *metaTable) checkFlushComplete(segmentID UniqueID) (bool, error) {
|
||||||
mt.lock.RLock()
|
mt.lock.Lock()
|
||||||
defer mt.lock.RUnlock()
|
defer mt.lock.Unlock()
|
||||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
return false, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||||
@ -201,8 +201,9 @@ func (mt *metaTable) checkFlushComplete(segmentID UniqueID) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
|
func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
|
||||||
mt.lock.RLock()
|
mt.lock.Lock()
|
||||||
defer mt.lock.RUnlock()
|
defer mt.lock.Unlock()
|
||||||
|
|
||||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
return nil, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||||
@ -215,8 +216,9 @@ func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mt *metaTable) getDDLBinlogPaths(collID UniqueID) (map[UniqueID][]string, error) {
|
func (mt *metaTable) getDDLBinlogPaths(collID UniqueID) (map[UniqueID][]string, error) {
|
||||||
mt.lock.RLock()
|
mt.lock.Lock()
|
||||||
defer mt.lock.RUnlock()
|
defer mt.lock.Unlock()
|
||||||
|
|
||||||
meta, ok := mt.collID2DdlMeta[collID]
|
meta, ok := mt.collID2DdlMeta[collID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("collection not exists with ID = " + strconv.FormatInt(collID, 10))
|
return nil, errors.Errorf("collection not exists with ID = " + strconv.FormatInt(collID, 10))
|
||||||
|
|||||||
@ -2,6 +2,12 @@ package writenode
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/uber/jaeger-client-go"
|
||||||
|
"github.com/uber/jaeger-client-go/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WriteNode struct {
|
type WriteNode struct {
|
||||||
@ -11,6 +17,8 @@ type WriteNode struct {
|
|||||||
flushSyncService *flushSyncService
|
flushSyncService *flushSyncService
|
||||||
metaService *metaService
|
metaService *metaService
|
||||||
replica collectionReplica
|
replica collectionReplica
|
||||||
|
tracer opentracing.Tracer
|
||||||
|
closer io.Closer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWriteNode(ctx context.Context, writeNodeID uint64) *WriteNode {
|
func NewWriteNode(ctx context.Context, writeNodeID uint64) *WriteNode {
|
||||||
@ -38,6 +46,22 @@ func Init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (node *WriteNode) Start() error {
|
func (node *WriteNode) Start() error {
|
||||||
|
cfg := &config.Configuration{
|
||||||
|
ServiceName: "write_node",
|
||||||
|
Sampler: &config.SamplerConfig{
|
||||||
|
Type: "const",
|
||||||
|
Param: 1,
|
||||||
|
},
|
||||||
|
Reporter: &config.ReporterConfig{
|
||||||
|
LogSpans: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
node.tracer, node.closer, err = cfg.NewTracer(config.Logger(jaeger.StdLogger))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||||
|
}
|
||||||
|
opentracing.SetGlobalTracer(node.tracer)
|
||||||
|
|
||||||
// TODO GOOSE Init Size??
|
// TODO GOOSE Init Size??
|
||||||
chanSize := 100
|
chanSize := 100
|
||||||
@ -61,4 +85,9 @@ func (node *WriteNode) Close() {
|
|||||||
if node.dataSyncService != nil {
|
if node.dataSyncService != nil {
|
||||||
(*node.dataSyncService).close()
|
(*node.dataSyncService).close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node.closer != nil {
|
||||||
|
node.closer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
54
scripts/cwrapper_rocksdb_build.sh
Normal file
54
scripts/cwrapper_rocksdb_build.sh
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
SOURCE=${BASH_SOURCE[0]}
|
||||||
|
while [ -h $SOURCE ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||||
|
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
|
||||||
|
SOURCE=$(readlink $SOURCE)
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE=$DIR/$SOURCE # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
||||||
|
done
|
||||||
|
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
|
||||||
|
# echo $DIR
|
||||||
|
|
||||||
|
SRC_DIR=${DIR}/../internal/kv/rocksdb/cwrapper
|
||||||
|
CGO_CFLAGS="-I$(SRC_DIR)/output/include"
|
||||||
|
CGO_LDFLAGS="-L$(SRC_DIR)/output/lib -l:librocksdb.a -lstdc++ -lm -lz"
|
||||||
|
|
||||||
|
OUTPUT_LIB=${SRC_DIR}/output
|
||||||
|
|
||||||
|
if [ -d ${OUTPUT_LIB} ];then
|
||||||
|
rm -rf ${OUTPUT_LIB}
|
||||||
|
fi
|
||||||
|
mkdir ${OUTPUT_LIB}
|
||||||
|
|
||||||
|
BUILD_TYPE="Debug"
|
||||||
|
|
||||||
|
while getopts "t:h:" arg; do
|
||||||
|
case $arg in
|
||||||
|
t)
|
||||||
|
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||||
|
;;
|
||||||
|
h) # help
|
||||||
|
echo "-t: build type(default: Debug)
|
||||||
|
-h: help
|
||||||
|
"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
?)
|
||||||
|
echo "ERROR! unknown argument"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
echo "BUILD_TYPE: " $BUILD_TYPE
|
||||||
|
|
||||||
|
pushd ${OUTPUT_LIB}
|
||||||
|
CMAKE_CMD="cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${SRC_DIR}"
|
||||||
|
|
||||||
|
${CMAKE_CMD}
|
||||||
|
echo ${CMAKE_CMD}
|
||||||
|
|
||||||
|
if [[ ! ${jobs+1} ]]; then
|
||||||
|
jobs=$(nproc)
|
||||||
|
fi
|
||||||
|
make -j ${jobs}
|
||||||
@ -8,15 +8,6 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
|
|||||||
done
|
done
|
||||||
ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||||
|
|
||||||
unameOut="$(uname -s)"
|
|
||||||
case "${unameOut}" in
|
|
||||||
Linux*) machine=Linux;;
|
|
||||||
Darwin*) machine=Mac;;
|
|
||||||
CYGWIN*) machine=Cygwin;;
|
|
||||||
MINGW*) machine=MinGw;;
|
|
||||||
*) machine="UNKNOWN:${unameOut}"
|
|
||||||
esac
|
|
||||||
|
|
||||||
# Attempt to run in the container with the same UID/GID as we have on the host,
|
# Attempt to run in the container with the same UID/GID as we have on the host,
|
||||||
# as this results in the correct permissions on files created in the shared
|
# as this results in the correct permissions on files created in the shared
|
||||||
# volumes. This isn't always possible, however, as IDs less than 100 are
|
# volumes. This isn't always possible, however, as IDs less than 100 are
|
||||||
@ -30,14 +21,8 @@ gid=$(id -g)
|
|||||||
[ "$uid" -lt 500 ] && uid=501
|
[ "$uid" -lt 500 ] && uid=501
|
||||||
[ "$gid" -lt 500 ] && gid=$uid
|
[ "$gid" -lt 500 ] && gid=$uid
|
||||||
|
|
||||||
awk 'c&&c--{sub(/^/,"#")} /# Build devcontainer/{c=5} 1' $ROOT_DIR/docker-compose.yml > $ROOT_DIR/docker-compose-vscode.yml.tmp
|
awk 'c&&c--{sub(/^/,"#")} /# Build devcontainer/{c=5} 1' $ROOT_DIR/docker-compose.yml > $ROOT_DIR/docker-compose-vscode.yml.bak
|
||||||
|
|
||||||
awk 'c&&c--{sub(/^/,"#")} /# Command/{c=3} 1' $ROOT_DIR/docker-compose-vscode.yml.tmp > $ROOT_DIR/docker-compose-vscode.yml
|
awk 'c&&c--{sub(/^/,"#")} /# Command/{c=3} 1' $ROOT_DIR/docker-compose-vscode.yml.bak > $ROOT_DIR/docker-compose-vscode.yml
|
||||||
|
|
||||||
rm $ROOT_DIR/docker-compose-vscode.yml.tmp
|
sed -i '.bak' "s/# user: {{ CURRENT_ID }}/user: \"$uid:$gid\"/g" $ROOT_DIR/docker-compose-vscode.yml
|
||||||
|
|
||||||
if [ "${machine}" == "Mac" ];then
|
|
||||||
sed -i '' "s/# user: {{ CURRENT_ID }}/user: \"$uid:$gid\"/g" $ROOT_DIR/docker-compose-vscode.yml
|
|
||||||
else
|
|
||||||
sed -i "s/# user: {{ CURRENT_ID }}/user: \"$uid:$gid\"/g" $ROOT_DIR/docker-compose-vscode.yml
|
|
||||||
fi
|
|
||||||
|
|||||||
@ -8,15 +8,13 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
|
|||||||
SOURCE="$(readlink "$SOURCE")"
|
SOURCE="$(readlink "$SOURCE")"
|
||||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
||||||
done
|
done
|
||||||
ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||||
|
|
||||||
# ignore Minio,S3 unittes
|
# ignore Minio,S3 unittes
|
||||||
MILVUS_DIR="${ROOT_DIR}/internal/"
|
MILVUS_DIR="${SCRIPTS_DIR}/../internal/"
|
||||||
echo $MILVUS_DIR
|
echo $MILVUS_DIR
|
||||||
|
go test -cover "${MILVUS_DIR}/kv/..." -failfast
|
||||||
go test -race -cover "${MILVUS_DIR}/kv/..." -failfast
|
go test -cover "${MILVUS_DIR}/proxy/..." -failfast
|
||||||
go test -race -cover "${MILVUS_DIR}/proxy/..." -failfast
|
go test -cover "${MILVUS_DIR}/writenode/..." -failfast
|
||||||
go test -race -cover "${MILVUS_DIR}/writenode/..." -failfast
|
go test -cover "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/storage" "${MILVUS_DIR}/util/..." -failfast
|
||||||
go test -race -cover "${MILVUS_DIR}/master/..." -failfast
|
#go test -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." -failfast
|
||||||
go test -cover "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/storage" "${MILVUS_DIR}/util/..." -failfast
|
|
||||||
#go test -race -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." -failfast
|
|
||||||
|
|||||||
@ -4,5 +4,5 @@ numpy==1.18.1
|
|||||||
pytest==5.3.4
|
pytest==5.3.4
|
||||||
pytest-cov==2.8.1
|
pytest-cov==2.8.1
|
||||||
pytest-timeout==1.3.4
|
pytest-timeout==1.3.4
|
||||||
pymilvus-distributed==0.0.14
|
pymilvus-distributed==0.0.10
|
||||||
sklearn==0.0
|
sklearn==0.0
|
||||||
|
|||||||
@ -101,6 +101,7 @@ class TestInsertBase:
|
|||||||
connect.flush([collection])
|
connect.flush([collection])
|
||||||
connect.drop_collection(collection)
|
connect.drop_collection(collection)
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_create_index(self, connect, collection, get_simple_index):
|
def test_insert_create_index(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -118,6 +119,7 @@ class TestInsertBase:
|
|||||||
if field["name"] == field_name:
|
if field["name"] == field_name:
|
||||||
assert field["indexes"][0] == get_simple_index
|
assert field["indexes"][0] == get_simple_index
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_create_index_new(self, connect, collection, get_simple_index):
|
def test_insert_create_index_new(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -135,6 +137,7 @@ class TestInsertBase:
|
|||||||
if field["name"] == field_name:
|
if field["name"] == field_name:
|
||||||
assert field["indexes"][0] == get_simple_index
|
assert field["indexes"][0] == get_simple_index
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -151,6 +154,7 @@ class TestInsertBase:
|
|||||||
if field["name"] == field_name:
|
if field["name"] == field_name:
|
||||||
assert field["indexes"][0] == get_simple_index
|
assert field["indexes"][0] == get_simple_index
|
||||||
|
|
||||||
|
# @pytest.mark.skip(" later ")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_search(self, connect, collection):
|
def test_insert_search(self, connect, collection):
|
||||||
'''
|
'''
|
||||||
@ -641,6 +645,7 @@ class TestInsertBinary:
|
|||||||
connect.flush([binary_collection])
|
connect.flush([binary_collection])
|
||||||
assert connect.count_entities(binary_collection) == default_nb
|
assert connect.count_entities(binary_collection) == default_nb
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
||||||
'''
|
'''
|
||||||
target: test insert binary entities after build index
|
target: test insert binary entities after build index
|
||||||
@ -657,6 +662,7 @@ class TestInsertBinary:
|
|||||||
if field["name"] == binary_field_name:
|
if field["name"] == binary_field_name:
|
||||||
assert field["indexes"][0] == get_binary_index
|
assert field["indexes"][0] == get_binary_index
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
||||||
'''
|
'''
|
||||||
@ -857,6 +863,7 @@ class TestInsertMultiCollections:
|
|||||||
connect.flush([collection_name])
|
connect.flush([collection_name])
|
||||||
assert len(ids) == 1
|
assert len(ids) == 1
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_create_index_insert_vector_another(self, connect, collection, get_simple_index):
|
def test_create_index_insert_vector_another(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -870,7 +877,7 @@ class TestInsertMultiCollections:
|
|||||||
ids = connect.bulk_insert(collection, default_entity)
|
ids = connect.bulk_insert(collection, default_entity)
|
||||||
connect.drop_collection(collection_name)
|
connect.drop_collection(collection_name)
|
||||||
|
|
||||||
@pytest.mark.skip("count entities")
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_vector_create_index_another(self, connect, collection, get_simple_index):
|
def test_insert_vector_create_index_another(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -885,7 +892,7 @@ class TestInsertMultiCollections:
|
|||||||
count = connect.count_entities(collection_name)
|
count = connect.count_entities(collection_name)
|
||||||
assert count == 0
|
assert count == 0
|
||||||
|
|
||||||
@pytest.mark.skip("count entities")
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
|
def test_insert_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
|
|||||||
@ -17,19 +17,19 @@ query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_
|
|||||||
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
|
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
|
||||||
|
|
||||||
|
|
||||||
# @pytest.mark.skip("wait for debugging...")
|
@pytest.mark.skip("wait for debugging...")
|
||||||
class TestIndexBase:
|
class TestIndexBase:
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
scope="function",
|
scope="function",
|
||||||
params=gen_simple_index()
|
params=gen_simple_index()
|
||||||
)
|
)
|
||||||
def get_simple_index(self, request, connect):
|
def get_simple_index(self, request, connect):
|
||||||
import copy
|
|
||||||
logging.getLogger().info(request.param)
|
logging.getLogger().info(request.param)
|
||||||
if str(connect._cmd("mode")) == "CPU":
|
# TODO: Determine the service mode
|
||||||
|
# if str(connect._cmd("mode")) == "CPU":
|
||||||
if request.param["index_type"] in index_cpu_not_support():
|
if request.param["index_type"] in index_cpu_not_support():
|
||||||
pytest.skip("sq8h not support in CPU mode")
|
pytest.skip("sq8h not support in CPU mode")
|
||||||
return copy.deepcopy(request.param)
|
return request.param
|
||||||
|
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
scope="function",
|
scope="function",
|
||||||
@ -132,7 +132,7 @@ class TestIndexBase:
|
|||||||
'''
|
'''
|
||||||
ids = connect.bulk_insert(collection, default_entities)
|
ids = connect.bulk_insert(collection, default_entities)
|
||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
# logging.getLogger().info(connect.get_collection_stats(collection))
|
logging.getLogger().info(connect.get_collection_stats(collection))
|
||||||
nq = get_nq
|
nq = get_nq
|
||||||
index_type = get_simple_index["index_type"]
|
index_type = get_simple_index["index_type"]
|
||||||
search_param = get_search_param(index_type)
|
search_param = get_search_param(index_type)
|
||||||
@ -140,7 +140,6 @@ class TestIndexBase:
|
|||||||
res = connect.search(collection, query)
|
res = connect.search(collection, query)
|
||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
|
|
||||||
@pytest.mark.skip("can't_pass_ci")
|
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_create_index_multithread(self, connect, collection, args):
|
def test_create_index_multithread(self, connect, collection, args):
|
||||||
@ -176,7 +175,6 @@ class TestIndexBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
connect.create_index(collection_name, field_name, default_index)
|
connect.create_index(collection_name, field_name, default_index)
|
||||||
|
|
||||||
@pytest.mark.skip("count_entries")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
|
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
|
||||||
@ -203,7 +201,6 @@ class TestIndexBase:
|
|||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
|
|
||||||
# TODO:
|
# TODO:
|
||||||
@pytest.mark.skip("get_collection_stats")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
def test_create_different_index_repeatedly(self, connect, collection):
|
def test_create_different_index_repeatedly(self, connect, collection):
|
||||||
@ -278,7 +275,7 @@ class TestIndexBase:
|
|||||||
ids = connect.bulk_insert(collection, default_entities)
|
ids = connect.bulk_insert(collection, default_entities)
|
||||||
get_simple_index["metric_type"] = metric_type
|
get_simple_index["metric_type"] = metric_type
|
||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
# logging.getLogger().info(connect.get_collection_stats(collection))
|
logging.getLogger().info(connect.get_collection_stats(collection))
|
||||||
nq = get_nq
|
nq = get_nq
|
||||||
index_type = get_simple_index["index_type"]
|
index_type = get_simple_index["index_type"]
|
||||||
search_param = get_search_param(index_type)
|
search_param = get_search_param(index_type)
|
||||||
@ -323,7 +320,6 @@ class TestIndexBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
connect.create_index(collection_name, field_name, default_index)
|
connect.create_index(collection_name, field_name, default_index)
|
||||||
|
|
||||||
@pytest.mark.skip("count_entries")
|
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
def test_create_index_no_vectors_insert_ip(self, connect, collection, get_simple_index):
|
def test_create_index_no_vectors_insert_ip(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -351,8 +347,6 @@ class TestIndexBase:
|
|||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
|
|
||||||
# TODO:
|
# TODO:
|
||||||
|
|
||||||
@pytest.mark.skip("get_collection_stats")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
def test_create_different_index_repeatedly_ip(self, connect, collection):
|
def test_create_different_index_repeatedly_ip(self, connect, collection):
|
||||||
@ -375,7 +369,6 @@ class TestIndexBase:
|
|||||||
******************************************************************
|
******************************************************************
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
def test_drop_index(self, connect, collection, get_simple_index):
|
def test_drop_index(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface
|
target: test drop index interface
|
||||||
@ -389,7 +382,6 @@ class TestIndexBase:
|
|||||||
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
||||||
assert not stats["partitions"][0]["segments"]
|
assert not stats["partitions"][0]["segments"]
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
|
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -406,7 +398,6 @@ class TestIndexBase:
|
|||||||
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
||||||
assert not stats["partitions"][0]["segments"]
|
assert not stats["partitions"][0]["segments"]
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_drop_index_without_connect(self, dis_connect, collection):
|
def test_drop_index_without_connect(self, dis_connect, collection):
|
||||||
'''
|
'''
|
||||||
@ -417,7 +408,6 @@ class TestIndexBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
dis_connect.drop_index(collection, field_name)
|
dis_connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
def test_drop_index_collection_not_existed(self, connect):
|
def test_drop_index_collection_not_existed(self, connect):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface when collection name not existed
|
target: test drop index interface when collection name not existed
|
||||||
@ -429,7 +419,6 @@ class TestIndexBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
connect.drop_index(collection_name, field_name)
|
connect.drop_index(collection_name, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
def test_drop_index_collection_not_create(self, connect, collection):
|
def test_drop_index_collection_not_create(self, connect, collection):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface when index not created
|
target: test drop index interface when index not created
|
||||||
@ -440,7 +429,6 @@ class TestIndexBase:
|
|||||||
# no create index
|
# no create index
|
||||||
connect.drop_index(collection, field_name)
|
connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_create_drop_index_repeatly(self, connect, collection, get_simple_index):
|
def test_create_drop_index_repeatly(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -452,7 +440,6 @@ class TestIndexBase:
|
|||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
connect.drop_index(collection, field_name)
|
connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
def test_drop_index_ip(self, connect, collection, get_simple_index):
|
def test_drop_index_ip(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface
|
target: test drop index interface
|
||||||
@ -467,7 +454,6 @@ class TestIndexBase:
|
|||||||
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
||||||
assert not stats["partitions"][0]["segments"]
|
assert not stats["partitions"][0]["segments"]
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
|
def test_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -485,7 +471,6 @@ class TestIndexBase:
|
|||||||
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
||||||
assert not stats["partitions"][0]["segments"]
|
assert not stats["partitions"][0]["segments"]
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_drop_index_without_connect_ip(self, dis_connect, collection):
|
def test_drop_index_without_connect_ip(self, dis_connect, collection):
|
||||||
'''
|
'''
|
||||||
@ -496,7 +481,6 @@ class TestIndexBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
dis_connect.drop_index(collection, field_name)
|
dis_connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
def test_drop_index_collection_not_create_ip(self, connect, collection):
|
def test_drop_index_collection_not_create_ip(self, connect, collection):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface when index not created
|
target: test drop index interface when index not created
|
||||||
@ -507,7 +491,6 @@ class TestIndexBase:
|
|||||||
# no create index
|
# no create index
|
||||||
connect.drop_index(collection, field_name)
|
connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
@pytest.mark.skip("drop_index")
|
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_create_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
|
def test_create_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -521,6 +504,7 @@ class TestIndexBase:
|
|||||||
connect.drop_index(collection, field_name)
|
connect.drop_index(collection, field_name)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip("binary")
|
||||||
class TestIndexBinary:
|
class TestIndexBinary:
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
scope="function",
|
scope="function",
|
||||||
@ -606,7 +590,6 @@ class TestIndexBinary:
|
|||||||
res = connect.search(binary_collection, query, search_params=search_param)
|
res = connect.search(binary_collection, query, search_params=search_param)
|
||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
|
|
||||||
@pytest.mark.skip("get status for build index failed")
|
|
||||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||||
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
|
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
|
||||||
'''
|
'''
|
||||||
@ -630,7 +613,6 @@ class TestIndexBinary:
|
|||||||
******************************************************************
|
******************************************************************
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.skip("get_collection_stats does not impl")
|
|
||||||
def test_get_index_info(self, connect, binary_collection, get_jaccard_index):
|
def test_get_index_info(self, connect, binary_collection, get_jaccard_index):
|
||||||
'''
|
'''
|
||||||
target: test describe index interface
|
target: test describe index interface
|
||||||
@ -650,7 +632,6 @@ class TestIndexBinary:
|
|||||||
if "index_type" in file:
|
if "index_type" in file:
|
||||||
assert file["index_type"] == get_jaccard_index["index_type"]
|
assert file["index_type"] == get_jaccard_index["index_type"]
|
||||||
|
|
||||||
@pytest.mark.skip("get_collection_stats does not impl")
|
|
||||||
def test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
|
def test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
|
||||||
'''
|
'''
|
||||||
target: test describe index interface
|
target: test describe index interface
|
||||||
@ -679,7 +660,6 @@ class TestIndexBinary:
|
|||||||
******************************************************************
|
******************************************************************
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.skip("get_collection_stats and drop_index do not impl")
|
|
||||||
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
|
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface
|
target: test drop index interface
|
||||||
@ -694,7 +674,6 @@ class TestIndexBinary:
|
|||||||
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
|
||||||
assert not stats["partitions"][0]["segments"]
|
assert not stats["partitions"][0]["segments"]
|
||||||
|
|
||||||
@pytest.mark.skip("get_collection_stats does not impl")
|
|
||||||
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
|
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
|
||||||
'''
|
'''
|
||||||
target: test drop index interface
|
target: test drop index interface
|
||||||
@ -720,6 +699,7 @@ class TestIndexBinary:
|
|||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip("wait for debugging...")
|
||||||
class TestIndexInvalid(object):
|
class TestIndexInvalid(object):
|
||||||
"""
|
"""
|
||||||
Test create / describe / drop index interfaces with invalid collection names
|
Test create / describe / drop index interfaces with invalid collection names
|
||||||
@ -758,6 +738,7 @@ class TestIndexInvalid(object):
|
|||||||
connect.create_index(collection, field_name, get_simple_index)
|
connect.create_index(collection, field_name, get_simple_index)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip("wait for debugging...")
|
||||||
class TestIndexAsync:
|
class TestIndexAsync:
|
||||||
@pytest.fixture(scope="function", autouse=True)
|
@pytest.fixture(scope="function", autouse=True)
|
||||||
def skip_http_check(self, args):
|
def skip_http_check(self, args):
|
||||||
|
|||||||
@ -101,6 +101,7 @@ class TestInsertBase:
|
|||||||
connect.flush([collection])
|
connect.flush([collection])
|
||||||
connect.drop_collection(collection)
|
connect.drop_collection(collection)
|
||||||
|
|
||||||
|
@pytest.mark.skip("create_index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_create_index(self, connect, collection, get_simple_index):
|
def test_insert_create_index(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -118,6 +119,7 @@ class TestInsertBase:
|
|||||||
if field["name"] == field_name:
|
if field["name"] == field_name:
|
||||||
assert field["indexes"][0] == get_simple_index
|
assert field["indexes"][0] == get_simple_index
|
||||||
|
|
||||||
|
@pytest.mark.skip("create_index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
def test_insert_after_create_index(self, connect, collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
@ -134,6 +136,7 @@ class TestInsertBase:
|
|||||||
if field["name"] == field_name:
|
if field["name"] == field_name:
|
||||||
assert field["indexes"][0] == get_simple_index
|
assert field["indexes"][0] == get_simple_index
|
||||||
|
|
||||||
|
@pytest.mark.skip(" todo fix search")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_search(self, connect, collection):
|
def test_insert_search(self, connect, collection):
|
||||||
'''
|
'''
|
||||||
@ -310,6 +313,7 @@ class TestInsertBinary:
|
|||||||
connect.flush([binary_collection])
|
connect.flush([binary_collection])
|
||||||
assert connect.count_entities(binary_collection) == default_nb
|
assert connect.count_entities(binary_collection) == default_nb
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
|
||||||
'''
|
'''
|
||||||
target: test insert binary entities after build index
|
target: test insert binary entities after build index
|
||||||
@ -326,6 +330,7 @@ class TestInsertBinary:
|
|||||||
if field["name"] == binary_field_name:
|
if field["name"] == binary_field_name:
|
||||||
assert field["indexes"][0] == get_binary_index
|
assert field["indexes"][0] == get_binary_index
|
||||||
|
|
||||||
|
@pytest.mark.skip("create index")
|
||||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||||
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
|
||||||
'''
|
'''
|
||||||
|
|||||||
@ -89,11 +89,10 @@ class TestSearchBase:
|
|||||||
params=gen_simple_index()
|
params=gen_simple_index()
|
||||||
)
|
)
|
||||||
def get_simple_index(self, request, connect):
|
def get_simple_index(self, request, connect):
|
||||||
import copy
|
|
||||||
if str(connect._cmd("mode")) == "CPU":
|
if str(connect._cmd("mode")) == "CPU":
|
||||||
if request.param["index_type"] in index_cpu_not_support():
|
if request.param["index_type"] in index_cpu_not_support():
|
||||||
pytest.skip("sq8h not support in CPU mode")
|
pytest.skip("sq8h not support in CPU mode")
|
||||||
return copy.deepcopy(request.param)
|
return request.param
|
||||||
|
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
scope="function",
|
scope="function",
|
||||||
@ -256,7 +255,7 @@ class TestSearchBase:
|
|||||||
assert res2[0][0].id == res[0][1].id
|
assert res2[0][0].id == res[0][1].id
|
||||||
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
|
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
|
||||||
|
|
||||||
# Pass
|
@pytest.mark.skip("search_after_index")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
||||||
'''
|
'''
|
||||||
@ -303,7 +302,7 @@ class TestSearchBase:
|
|||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
assert len(res[0]) == default_top_k
|
assert len(res[0]) == default_top_k
|
||||||
|
|
||||||
# pass
|
@pytest.mark.skip("search_index_partition")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
||||||
'''
|
'''
|
||||||
@ -334,7 +333,7 @@ class TestSearchBase:
|
|||||||
res = connect.search(collection, query, partition_tags=[default_tag])
|
res = connect.search(collection, query, partition_tags=[default_tag])
|
||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
|
|
||||||
# PASS
|
@pytest.mark.skip("search_index_partition_B")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_index_partition_B(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
def test_search_index_partition_B(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
||||||
'''
|
'''
|
||||||
@ -384,7 +383,7 @@ class TestSearchBase:
|
|||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
assert len(res[0]) == 0
|
assert len(res[0]) == 0
|
||||||
|
|
||||||
# PASS
|
@pytest.mark.skip("search_index_partitions")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
|
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
|
||||||
'''
|
'''
|
||||||
@ -418,7 +417,7 @@ class TestSearchBase:
|
|||||||
assert res[0]._distances[0] > epsilon
|
assert res[0]._distances[0] > epsilon
|
||||||
assert res[1]._distances[0] > epsilon
|
assert res[1]._distances[0] > epsilon
|
||||||
|
|
||||||
# Pass
|
@pytest.mark.skip("search_index_partitions_B")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
|
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
|
||||||
'''
|
'''
|
||||||
@ -452,7 +451,7 @@ class TestSearchBase:
|
|||||||
assert res[0]._distances[0] < epsilon
|
assert res[0]._distances[0] < epsilon
|
||||||
assert res[1]._distances[0] < epsilon
|
assert res[1]._distances[0] < epsilon
|
||||||
|
|
||||||
# pass
|
#
|
||||||
# test for ip metric
|
# test for ip metric
|
||||||
#
|
#
|
||||||
# TODO: reopen after we supporting ip flat
|
# TODO: reopen after we supporting ip flat
|
||||||
@ -478,7 +477,7 @@ class TestSearchBase:
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
res = connect.search(collection, query)
|
res = connect.search(collection, query)
|
||||||
|
|
||||||
# PASS
|
@pytest.mark.skip("search_ip_after_index")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
||||||
'''
|
'''
|
||||||
@ -507,6 +506,7 @@ class TestSearchBase:
|
|||||||
assert check_id_result(res[0], ids[0])
|
assert check_id_result(res[0], ids[0])
|
||||||
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
|
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
|
||||||
|
|
||||||
|
@pytest.mark.skip("search_ip_index_partition")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_ip_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
def test_search_ip_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
|
||||||
'''
|
'''
|
||||||
@ -539,7 +539,7 @@ class TestSearchBase:
|
|||||||
res = connect.search(collection, query, partition_tags=[default_tag])
|
res = connect.search(collection, query, partition_tags=[default_tag])
|
||||||
assert len(res) == nq
|
assert len(res) == nq
|
||||||
|
|
||||||
# PASS
|
@pytest.mark.skip("search_ip_index_partitions")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
|
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
|
||||||
'''
|
'''
|
||||||
@ -618,7 +618,7 @@ class TestSearchBase:
|
|||||||
res = connect.search(collection, query)
|
res = connect.search(collection, query)
|
||||||
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
|
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
|
||||||
|
|
||||||
# Pass
|
@pytest.mark.skip("search_distance_l2_after_index")
|
||||||
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
|
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
target: search collection, and check the result: distance
|
target: search collection, and check the result: distance
|
||||||
@ -672,7 +672,7 @@ class TestSearchBase:
|
|||||||
res = connect.search(collection, query)
|
res = connect.search(collection, query)
|
||||||
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
|
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
|
||||||
|
|
||||||
# Pass
|
@pytest.mark.skip("search_distance_ip_after_index")
|
||||||
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
|
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
|
||||||
'''
|
'''
|
||||||
target: search collection, and check the result: distance
|
target: search collection, and check the result: distance
|
||||||
@ -942,7 +942,7 @@ class TestSearchBase:
|
|||||||
assert res[i]._distances[0] < epsilon
|
assert res[i]._distances[0] < epsilon
|
||||||
assert res[i]._distances[1] > epsilon
|
assert res[i]._distances[1] > epsilon
|
||||||
|
|
||||||
@pytest.mark.skip("test_query_entities_with_field_less_than_top_k")
|
@pytest.mark.skip("query_entities_with_field_less_than_top_k")
|
||||||
def test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
|
def test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
|
||||||
"""
|
"""
|
||||||
target: test search with field, and let return entities less than topk
|
target: test search with field, and let return entities less than topk
|
||||||
@ -1741,7 +1741,8 @@ class TestSearchInvalid(object):
|
|||||||
def get_search_params(self, request):
|
def get_search_params(self, request):
|
||||||
yield request.param
|
yield request.param
|
||||||
|
|
||||||
# Pass
|
# TODO: reopen after we supporting create index
|
||||||
|
@pytest.mark.skip("search_with_invalid_params")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
|
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
|
||||||
'''
|
'''
|
||||||
@ -1762,7 +1763,8 @@ class TestSearchInvalid(object):
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
res = connect.search(collection, query)
|
res = connect.search(collection, query)
|
||||||
|
|
||||||
# pass
|
# TODO: reopen after we supporting binary type
|
||||||
|
@pytest.mark.skip("search_with_invalid_params_binary")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_with_invalid_params_binary(self, connect, binary_collection):
|
def test_search_with_invalid_params_binary(self, connect, binary_collection):
|
||||||
'''
|
'''
|
||||||
@ -1781,7 +1783,7 @@ class TestSearchInvalid(object):
|
|||||||
with pytest.raises(Exception) as e:
|
with pytest.raises(Exception) as e:
|
||||||
res = connect.search(binary_collection, query)
|
res = connect.search(binary_collection, query)
|
||||||
|
|
||||||
# Pass
|
@pytest.mark.skip("search_with_empty_params")
|
||||||
@pytest.mark.level(2)
|
@pytest.mark.level(2)
|
||||||
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
|
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
|
||||||
'''
|
'''
|
||||||
|
|||||||
@ -55,7 +55,7 @@ default_index_params = [
|
|||||||
{"nlist": 128},
|
{"nlist": 128},
|
||||||
{"nlist": 128},
|
{"nlist": 128},
|
||||||
{"nlist": 128},
|
{"nlist": 128},
|
||||||
{"nlist": 128, "m": 16, "nbits": 8},
|
{"nlist": 128, "m": 16},
|
||||||
{"M": 48, "efConstruction": 500},
|
{"M": 48, "efConstruction": 500},
|
||||||
# {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50},
|
# {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50},
|
||||||
{"n_trees": 50},
|
{"n_trees": 50},
|
||||||
@ -281,9 +281,9 @@ def gen_entities(nb, is_normal=False):
|
|||||||
def gen_entities_new(nb, is_normal=False):
|
def gen_entities_new(nb, is_normal=False):
|
||||||
vectors = gen_vectors(nb, default_dim, is_normal)
|
vectors = gen_vectors(nb, default_dim, is_normal)
|
||||||
entities = [
|
entities = [
|
||||||
{"name": "int64", "type": DataType.INT64, "values": [i for i in range(nb)]},
|
{"name": "int64", "values": [i for i in range(nb)]},
|
||||||
{"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(nb)]},
|
{"name": "float", "values": [float(i) for i in range(nb)]},
|
||||||
{"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": vectors}
|
{"name": default_float_vec_field_name, "values": vectors}
|
||||||
]
|
]
|
||||||
return entities
|
return entities
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user