milvus/core/unittest/db/test_db.cpp
Jin Hai dab74700b2
Delete and WAL feature branch merge (#1436)
* add read/write lock

* change compact to ddl queue

* add api to get vector data

* add flush / merge / compact lock

* add api to get vector data

* add data size for table info

* add db recovery test

* add data_size check

* change file name to uppercase

Signed-off-by: jinhai <hai.jin@zilliz.com>

* update wal flush_merge_compact_mutex_

* update wal flush_merge_compact_mutex_

* change requirement

* change requirement

* upd requirement

* add logging

* add logging

* add logging

* add logging

* add logging

* add logging

* add logging

* add logging

* add logging

* delete part

* add all size checks

* fix bug

* update faiss get_vector_by_id

* add get_vector case

* update get vector by id

* update server

* fix DBImpl

* attempting to fix #1268

* lint

* update unit test

* fix #1259

* issue 1271 fix wal config

* update

* fix cases

Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com>

* update read / write error message

* update read / write error message

* [skip ci] get vectors by id from raw files instead faiss

* [skip ci] update FilesByType meta

* update

* fix ci error

* update

* lint

* Hide partition_name parameter

* Remove douban pip source

Signed-off-by: zhenwu <zw@zilliz.com>

* Update epsilon value in test cases

Signed-off-by: zhenwu <zw@zilliz.com>

* Add default partition

* Caiyd crud (#1313)

* fix clang format

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix unittest build error

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* add faiss_bitset_test

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* avoid user directly operate partition table

* fix has table bug

* Caiyd crud (#1323)

* fix clang format

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix unittest build error

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* use compile option -O3

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* update faiss_bitset_test.cpp

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* change open flags

* change OngoingFileChecker to static instance

* mark ongoing files when applying deletes

* update clean up with ttl

* fix centos ci

* update

* lint

* update partition

Signed-off-by: zhenwu <zw@zilliz.com>

* update delete and flush to include partitions

* update

* Update cases

Signed-off-by: zhenwu <zw@zilliz.com>

* Fix test cases crud (#1350)

* fix order

* add wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix invalid operation issue

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix invalid operation issue

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix bug

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix bug

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* crud fix

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* crud fix

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* add table info test cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>
Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* merge cases

Signed-off-by: zhenwu <zw@zilliz.com>

* Shengjun (#1349)

* Add GPU sharing solution on native Kubernetes  (#1102)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Fix http server bug (#1096)

* refactoring(create_table done)

* refactoring

* refactor server delivery (insert done)

* refactoring server module (count_table done)

* server refactor done

* cmake pass

* refactor server module done.

* set grpc response status correctly

* format done.

* fix redefine ErrorMap()

* optimize insert reducing ids data copy

* optimize grpc request with reducing data copy

* clang format

* [skip ci] Refactor server module done. update changlog. prepare for PR

* remove explicit and change int32_t to int64_t

* add web server

* [skip ci] add license in web module

* modify header include & comment oatpp environment config

* add port configure & create table in handler

* modify web url

* simple url complation done & add swagger

* make sure web url

* web functionality done. debuging

* add web unittest

* web test pass

* add web server port

* add web server port in template

* update unittest cmake file

* change web server default port to 19121

* rename method in web module & unittest pass

* add search case in unittest for web module

* rename some variables

* fix bug

* unittest pass

* web prepare

* fix cmd bug(check server status)

* update changlog

* add web port validate & default set

* clang-format pass

* add web port test in unittest

* add CORS & redirect root to swagger ui

* add web status

* web table method func cascade test pass

* add config url in web module

* modify thirdparty cmake to avoid building oatpp test

* clang format

* update changlog

* add constants in web module

* reserve Config.cpp

* fix constants reference bug

* replace web server with async module

* modify component to support async

* format

* developing controller & add test clent into unittest

* add web port into demo/server_config

* modify thirdparty cmake to allow build test

* remove  unnecessary comment

* add endpoint info in controller

* finish web test(bug here)

* clang format

* add web test cpp to lint exclusions

* check null field in GetConfig

* add macro RETURN STATUS DTo

* fix cmake conflict

* fix crash when exit server

* remove surplus comments & add http param check

* add uri /docs to direct swagger

* format

* change cmd to system

* add default value & unittest in web module

* add macros to judge if GPU supported

* add macros in unit & add default in index dto & print error message when bind http port fail

* format (fix #788)

* fix cors bug (not completed)

* comment cors

* change web framework to simple api

* comments optimize

* change to simple API

* remove comments in controller.hpp

* remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger

* add ep cmake args to sqlite

* clang-format

* change a format

* test pass

* change name to

* fix compiler issue(oatpp-swagger depend on oatpp)

* add & in start_server.h

* specify lib location with oatpp and oatpp-swagger

* add comments

* add swagger definition

* [skip ci] change http method options status code

* remove oatpp swagger(fix #970)

* remove comments

* check Start web behavior

* add default to cpu_cache_capacity

* remove swagger component.hpp & /docs url

* remove /docs info

* remove /docs in unittest

* remove space in test rpc

* remove repeate info in CHANGLOG

* change cache_insert_data default value as a constant

* [skip ci] Fix some broken links (#960)

* [skip ci] Fix broken link

* [skip ci] Fix broken link

* [skip ci] Fix broken link

* [skip ci] Fix broken links

* fix issue 373 (#964)

* fix issue 373

* Adjustment format

* Adjustment format

* Adjustment format

* change readme

* #966 update NOTICE.md (#967)

* remove comments

* check Start web behavior

* add default to cpu_cache_capacity

* remove swagger component.hpp & /docs url

* remove /docs info

* remove /docs in unittest

* remove space in test rpc

* remove repeate info in CHANGLOG

* change cache_insert_data default value as a constant

* adjust web port cofig place

* rename web_port variable

* change gpu resources invoke way to cmd()

* set advanced config name add DEFAULT

* change config setting to cmd

* modify ..

* optimize code

* assign TableDto' count default value 0 (fix #995)

* check if table exists when show partitions (fix #1028)

* check table exists when drop partition (fix #1029)

* check if partition name is legal (fix #1022)

* modify status code when partition tag is illegal

* update changlog

* add info to /system url

* add binary index and add bin uri & handler method(not completed)

* optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067)

* fix test partition bug

* fix test bug when check insert records

* add binary vectors test

* add default for offset and page_size

* fix uinttest bug

* [skip ci] remove comments

* optimize web code for PR comments

* add new folder named utils

* check offset and pagesize (fix #1082)

* improve error message if offset or page_size is not legal (fix #1075)

* add log into web module

* update changlog

* check gpu sources setting when assign repeated value (fix #990)

* update changlog

* clang-format pass

* add default handler in http handler

* [skip ci] improve error msg when check gpu resources

* change check offset way

* remove func IsIntStr

* add case

* change int32 to int64 when check number str

* add log in we module(doing)

* update test case

* add log in web controller

Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com>
Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com>
Co-authored-by: Cai Yudong <yudong.cai@zilliz.com>

* Filtering for specific paths in Jenkins CI  (#1107)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Fix Filtering for specific paths in Jenkins CI bug (#1109)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Fix Filtering for specific paths in Jenkins CI bug (#1110)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Don't skip ci when triggered by a time (#1113)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Don't skip ci when triggered by a time

* Don't skip ci when triggered by a time

* Set default sending to Milvus Dev mail group  (#1121)

* run hadolint with reviewdog

* add LINCENSE in Dockerfile

* run hadolint with reviewdog

* Reporter of reviewdog command is "github-pr-check"

* format Dockerfile

* ignore DL3007 in hadolint

* clean up old docker images

* Add GPU sharing solution on native Kubernetes

* nightly test mailer

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Test filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* Filtering for specific paths in Jenkins CI

* No skip ci when triggered by a time

* Don't skip ci when triggered by a time

* Set default sending to Milvus Dev

* Support hnsw (#1131)

* add hnsw

* add config

* format...

* format..

* Remove test.template (#1129)

* Update framework

* remove files

* Remove files

* Remove ann-acc cases && Update java-sdk cases

* change cn to en

* [skip ci] remove doc test

* [skip ci] change cn to en

* Case stability

* Add mail notification when test failed

* Add main notification

* Add main notification

* gen milvus instance from utils

* Distable case with multiprocess

* Add mail notification when nightly test failed

* add milvus handler param

* add http handler

* Remove test.template

Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com>

* Add doc for the RESTful API / Update contributor number in Milvus readme (#1100)

* [skip ci] Update contributor number.

* [skip ci] Add RESTful API doc.

* [skip ci] Some updates.

* [skip ci] Change port to 19121.

* [skip ci] Update README.md.

Update the descriptions for OPTIONS.

* Update README.md

Fix a typo.

* #1105 update error message when creating IVFSQ8H index without GPU resources (#1117)

* [skip ci] Update README (#1104)

* remove Nvidia owned files from faiss (#1136)

* #1135 remove Nvidia owned files from faiss

* Revert "#1135 remove Nvidia owned files from faiss"

This reverts commit 3bc007c28c8df5861fdd0452fd64c0e2e719eda2.

* #1135 remove Nvidia API implementation

* #1135 remove Nvidia owned files from faiss

* Update CODE_OF_CONDUCT.md (#1163)

* Improve codecov (#1095)

* Optimize config test. Dir src/config 99% lines covered

* add unittest coverage

* optimize cache&config unittest

* code format

* format

* format code

* fix merge conflict

* cover src/utils unittest

*  '#831 fix exe_path judge error'

* #831 fix exe_path judge error

* add some unittest coverage

* add some unittest coverage

* improve coverage of src/wrapper

* improve src/wrapper coverage

* *test optimize db/meta unittest

* fix bug

* *test optimize mysqlMetaImpl unittest

* *style: format code

* import server& scheduler unittest coverage

* handover next work

* *test: add some test_meta test case

* *format code

* *fix: fix typo

* feat(codecov): improve code coverage for src/db(#872)

* feat(codecov): improve code coverage for src/db/engine(#872)

* feat(codecov): improve code coverage(#872)

* fix config unittest bug

* feat(codecov): improve code coverage core/db/engine(#872)

* feat(codecov): improve code coverage core/knowhere

* feat(codecov): improve code coverage core/knowhere

* feat(codecov): improve code coverage

* feat(codecov): fix cpu test some error

* feat(codecov): improve code coverage

* feat(codecov): rename some fiu

* fix(db/meta): fix switch/case default action

* feat(codecov): improve code coverage(#872)
* fix error caused by merge code
* format code

* feat(codecov): improve code coverage & format code(#872)

* feat(codecov): fix test error(#872)

* feat(codecov): fix unittest test_mem(#872)

* feat(codecov): fix unittest(#872)

* feat(codecov): fix unittest for resource manager(#872)

* feat(codecov): code format (#872)

* feat(codecov): trigger ci(#872)

* fix(RequestScheduler): remove a wrong sleep statement

* test(test_rpc): fix rpc test

* Fix format issue

* Remove unused comments

* Fix unit test error

Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com>
Co-authored-by: Jin Hai <hai.jin@zilliz.com>

* Support run dev test with http handler in python SDK (#1116)

* refactoring(create_table done)

* refactoring

* refactor server delivery (insert done)

* refactoring server module (count_table done)

* server refactor done

* cmake pass

* refactor server module done.

* set grpc response status correctly

* format done.

* fix redefine ErrorMap()

* optimize insert reducing ids data copy

* optimize grpc request with reducing data copy

* clang format

* [skip ci] Refactor server module done. update changlog. prepare for PR

* remove explicit and change int32_t to int64_t

* add web server

* [skip ci] add license in web module

* modify header include & comment oatpp environment config

* add port configure & create table in handler

* modify web url

* simple url complation done & add swagger

* make sure web url

* web functionality done. debuging

* add web unittest

* web test pass

* add web server port

* add web server port in template

* update unittest cmake file

* change web server default port to 19121

* rename method in web module & unittest pass

* add search case in unittest for web module

* rename some variables

* fix bug

* unittest pass

* web prepare

* fix cmd bug(check server status)

* update changlog

* add web port validate & default set

* clang-format pass

* add web port test in unittest

* add CORS & redirect root to swagger ui

* add web status

* web table method func cascade test pass

* add config url in web module

* modify thirdparty cmake to avoid building oatpp test

* clang format

* update changlog

* add constants in web module

* reserve Config.cpp

* fix constants reference bug

* replace web server with async module

* modify component to support async

* format

* developing controller & add test clent into unittest

* add web port into demo/server_config

* modify thirdparty cmake to allow build test

* remove  unnecessary comment

* add endpoint info in controller

* finish web test(bug here)

* clang format

* add web test cpp to lint exclusions

* check null field in GetConfig

* add macro RETURN STATUS DTo

* fix cmake conflict

* fix crash when exit server

* remove surplus comments & add http param check

* add uri /docs to direct swagger

* format

* change cmd to system

* add default value & unittest in web module

* add macros to judge if GPU supported

* add macros in unit & add default in index dto & print error message when bind http port fail

* format (fix #788)

* fix cors bug (not completed)

* comment cors

* change web framework to simple api

* comments optimize

* change to simple API

* remove comments in controller.hpp

* remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger

* add ep cmake args to sqlite

* clang-format

* change a format

* test pass

* change name to

* fix compiler issue(oatpp-swagger depend on oatpp)

* add & in start_server.h

* specify lib location with oatpp and oatpp-swagger

* add comments

* add swagger definition

* [skip ci] change http method options status code

* remove oatpp swagger(fix #970)

* remove comments

* check Start web behavior

* add default to cpu_cache_capacity

* remove swagger component.hpp & /docs url

* remove /docs info

* remove /docs in unittest

* remove space in test rpc

* remove repeate info in CHANGLOG

* change cache_insert_data default value as a constant

* [skip ci] Fix some broken links (#960)

* [skip ci] Fix broken link

* [skip ci] Fix broken link

* [skip ci] Fix broken link

* [skip ci] Fix broken links

* fix issue 373 (#964)

* fix issue 373

* Adjustment format

* Adjustment format

* Adjustment format

* change readme

* #966 update NOTICE.md (#967)

* remove comments

* check Start web behavior

* add default to cpu_cache_capacity

* remove swagger component.hpp & /docs url

* remove /docs info

* remove /docs in unittest

* remove space in test rpc

* remove repeate info in CHANGLOG

* change cache_insert_data default value as a constant

* adjust web port cofig place

* rename web_port variable

* change gpu resources invoke way to cmd()

* set advanced config name add DEFAULT

* change config setting to cmd

* modify ..

* optimize code

* assign TableDto' count default value 0 (fix #995)

* check if table exists when show partitions (fix #1028)

* check table exists when drop partition (fix #1029)

* check if partition name is legal (fix #1022)

* modify status code when partition tag is illegal

* update changlog

* add info to /system url

* add binary index and add bin uri & handler method(not completed)

* optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067)

* fix test partition bug

* fix test bug when check insert records

* add binary vectors test

* add default for offset and page_size

* fix uinttest bug

* [skip ci] remove comments

* optimize web code for PR comments

* add new folder named utils

* check offset and pagesize (fix #1082)

* improve error message if offset or page_size is not legal (fix #1075)

* add log into web module

* update changlog

* check gpu sources setting when assign repeated value (fix #990)

* update changlog

* clang-format pass

* add default handler in http handler

* [skip ci] improve error msg when check gpu resources

* change check offset way

* remove func IsIntStr

* add case

* change int32 to int64 when check number str

* add log in we module(doing)

* update test case

* add log in web controller

* remove surplus dot

* add preload into /system/

* change get_milvus() to get_milvus(args['handler'])

* support load table into memory with http server (fix #1115)

* [skip ci] comment surplus dto in VectorDto

Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com>
Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com>
Co-authored-by: Cai Yudong <yudong.cai@zilliz.com>

* Fix #1140 (#1162)

* fix

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* update...

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* fix2

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* fix3

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* update changelog

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* Update INSTALL.md (#1175)

* Update INSTALL.md

1. Change image tag and Milvus source code to latest.
2. Fix a typo

Signed-off-by: Lu Wang <yamasite@qq.com>

* Update INSTALL.md

Signed-off-by: lu.wang <yamasite@qq.com>

* add Tanimoto ground truth (#1138)

* add milvus ground truth

* add milvus groundtruth

* [skip ci] add milvus ground truth

* [skip ci]add tanimoto ground truth

* fix mix case bug (#1208)

* fix mix case bug

Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com>

* Remove case.md

Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com>

* Update README.md (#1206)

Add LFAI mailing lists.

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Add design.md to store links to design docs (#1219)

* Update README.md

Add link to Milvus design docs

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Create design.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update design.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Add troubleshooting info about libmysqlpp.so.3 error (#1225)

* Update INSTALL.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update INSTALL.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update README.md (#1233)

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* #1240 Update license declaration of each file (#1241)

* #1240 Update license declaration of each files

Signed-off-by: jinhai <hai.jin@zilliz.com>

* #1240 Update CHANGELOG

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Update README.md (#1258)

Add Jenkins master badge.

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update INSTALL.md (#1265)

Fix indentation.

* support CPU profiling (#1251)

* #1250 support CPU profiling

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* #1250 fix code coverage

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* Fix HNSW crash (#1262)

* fix

Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com>

* update.

Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com>

* Add troubleshooting information for INSTALL.md and enhance readability (#1274)

* Update INSTALL.md

1. Add new troubleshooting message;
2. Enhance readability.

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update INSTALL.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update INSTALL.md

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Update INSTALL.md

Add CentOS link.

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* Create COMMUNITY.md (#1292)

Signed-off-by: Lutkin Wang <yamasite@qq.com>

* fix gtest

* add copyright

* fix gtest

* MERGE_NOT_YET

* fix lint

Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com>
Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com>
Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com>
Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com>
Co-authored-by: Cai Yudong <yudong.cai@zilliz.com>
Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com>
Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com>
Co-authored-by: Lutkin Wang <yamasite@qq.com>
Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com>
Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com>
Co-authored-by: Jin Hai <hai.jin@zilliz.com>
Co-authored-by: shiyu22 <cshiyu22@gmail.com>

* #1302 Get all record IDs in a segment by given a segment id

* Remove query time ranges

Signed-off-by: zhenwu <zw@zilliz.com>

* #1295 let wal enable by default

* fix cases

Signed-off-by: zhenwu <zw@zilliz.com>

* fix partition cases

Signed-off-by: zhenwu <zw@zilliz.com>

* [skip ci] update test_db

* update

* fix case bug

Signed-off-by: zhenwu <zw@zilliz.com>

* lint

* fix test case failures

* remove some code

* Caiyd crud 1 (#1377)

* fix clang format

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix unittest build error

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix build issue when enable profiling

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix hastable bug

* update bloom filter

* update

* benchmark

* update benchmark

* update

* update

* remove wal record size

Signed-off-by: shengjun.li <shengjun.li@zilliz.com>

* remove wal record size config

Signed-off-by: shengjun.li <shengjun.li@zilliz.com>

* update apply deletes: switch to binary search

* update sdk_simple

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* update apply deletes: switch to binary search

* add test_search_by_id

Signed-off-by: zhenwu <zw@zilliz.com>

* add more log

* flush error with multi same ids

Signed-off-by: zhenwu <zw@zilliz.com>

* modify wal config

Signed-off-by: shengjun.li <shengjun.li@zilliz.com>

* update

* add binary search_by_id

* fix case bug

Signed-off-by: zhenwu <zw@zilliz.com>

* update cases

Signed-off-by: zhenwu <zw@zilliz.com>

* fix unit test #1395

* improve merge performance

* add uids_ for VectorIndex to improve search performance

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix error

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* update

* fix search

* fix record num

Signed-off-by: shengjun.li <shengjun.li@zilliz.com>

* refine code

* refine code

* Add get_vector_ids test cases (#1407)

* fix order

* add wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix wal case

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix invalid operation issue

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix invalid operation issue

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix bug

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* fix bug

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* crud fix

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* crud fix

Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au>

* add table info test cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>
Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* add to compact case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* add to compact case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* add to compact case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* add case and debug compact

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* test pdb

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* test pdb

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* test pdb

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix cases

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update table_info case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update table_info case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update table_info case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update get vector ids case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update get vector ids case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update get vector ids case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update get vector ids case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* update case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* pdb test

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* pdb test

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* add tests for get_vector_ids

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix case

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* add binary and ip

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix binary index

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* fix pdb

Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>

* #1408 fix search result in-correct after DeleteById

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* add one case

* delete failed segment

* update serialize

* update serialize

* fix case

Signed-off-by: zhenwu <zw@zilliz.com>

* update

* update case assertion

Signed-off-by: zhenwu <zw@zilliz.com>

* [skip ci] update config

* change bloom filter msync flag to async

* #1319 add more timing debug info

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* update

* update

* add normalize

Signed-off-by: zhenwu <zw@zilliz.com>

* add normalize

Signed-off-by: zhenwu <zw@zilliz.com>

* add normalize

Signed-off-by: zhenwu <zw@zilliz.com>

* Fix compiling error

Signed-off-by: jinhai <hai.jin@zilliz.com>

* support ip (#1383)

* support ip

Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com>

* IP result distance sort by descend

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* update

Signed-off-by: Nicky <nicky.xj.lin@gmail.com>

* format

Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com>

* get table lsn

* Remove unused third party

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Refine code

Signed-off-by: jinhai <hai.jin@zilliz.com>

* #1319 fix clang format

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* fix wal applied lsn

Signed-off-by: shengjun.li <shengjun.li@zilliz.com>

* validate partition tag

* #1319 improve search performance

Signed-off-by: yudong.cai <yudong.cai@zilliz.com>

* build error

Co-authored-by: Zhiru Zhu <youny626@hotmail.com>
Co-authored-by: groot <yihua.mo@zilliz.com>
Co-authored-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au>
Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com>
Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com>
Co-authored-by: shengjun.li <49774184+shengjun1985@users.noreply.github.com>
Co-authored-by: Cai Yudong <yudong.cai@zilliz.com>
Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com>
Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com>
Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com>
Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com>
Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com>
Co-authored-by: Lutkin Wang <yamasite@qq.com>
Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com>
Co-authored-by: shiyu22 <cshiyu22@gmail.com>
2020-02-29 16:11:31 +08:00

1355 lines
46 KiB
C++

// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include <fiu-control.h>
#include <fiu-local.h>
#include <gtest/gtest.h>
#include <boost/filesystem.hpp>
#include <random>
#include <thread>
#include "cache/CpuCacheMgr.h"
#include "db/Constants.h"
#include "db/DB.h"
#include "db/DBFactory.h"
#include "db/DBImpl.h"
#include "db/IDGenerator.h"
#include "db/meta/MetaConsts.h"
#include "db/utils.h"
#include "server/Config.h"
#include "utils/CommonUtil.h"
namespace {
static const char* TABLE_NAME = "test_group";
static constexpr int64_t TABLE_DIM = 256;
static constexpr int64_t VECTOR_COUNT = 25000;
static constexpr int64_t INSERT_LOOP = 1000;
static constexpr int64_t SECONDS_EACH_HOUR = 3600;
static constexpr int64_t DAY_SECONDS = 24 * 60 * 60;
milvus::engine::meta::TableSchema
BuildTableSchema() {
milvus::engine::meta::TableSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.table_id_ = TABLE_NAME;
return table_info;
}
void
BuildVectors(uint64_t n, uint64_t batch_index, milvus::engine::VectorsData& vectors) {
vectors.vector_count_ = n;
vectors.float_data_.clear();
vectors.float_data_.resize(n * TABLE_DIM);
float* data = vectors.float_data_.data();
for (uint64_t i = 0; i < n; i++) {
for (int64_t j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48();
data[TABLE_DIM * i] += i / 2000.;
vectors.id_array_.push_back(n * batch_index + i);
}
// milvus::engine::SimpleIDGenerator id_gen;
// id_gen.GetNextIDNumbers(n, vectors.id_array_);
}
std::string
CurrentTmDate(int64_t offset_day = 0) {
time_t tt;
time(&tt);
tt = tt + 8 * SECONDS_EACH_HOUR;
tt = tt + 24 * SECONDS_EACH_HOUR * offset_day;
tm t;
gmtime_r(&tt, &t);
std::string str =
std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday);
return str;
}
void
ConvertTimeRangeToDBDates(const std::string& start_value, const std::string& end_value,
std::vector<milvus::engine::meta::DateT>& dates) {
dates.clear();
time_t tt_start, tt_end;
tm tm_start, tm_end;
if (!milvus::server::CommonUtil::TimeStrToTime(start_value, tt_start, tm_start)) {
return;
}
if (!milvus::server::CommonUtil::TimeStrToTime(end_value, tt_end, tm_end)) {
return;
}
int64_t days = (tt_end > tt_start) ? (tt_end - tt_start) / DAY_SECONDS : (tt_start - tt_end) / DAY_SECONDS;
if (days == 0) {
return;
}
for (int64_t i = 0; i < days; i++) {
time_t tt_day = tt_start + DAY_SECONDS * i;
tm tm_day;
milvus::server::CommonUtil::ConvertTime(tt_day, tm_day);
int64_t date = tm_day.tm_year * 10000 + tm_day.tm_mon * 100 + tm_day.tm_mday; // according to db logic
dates.push_back(date);
}
}
} // namespace
TEST_F(DBTest, CONFIG_TEST) {
{
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf("wrong"));
/* EXPECT_DEATH(engine::ArchiveConf conf("wrong"), ""); */
}
{
milvus::engine::ArchiveConf conf("delete");
ASSERT_EQ(conf.GetType(), "delete");
auto criterias = conf.GetCriterias();
ASSERT_EQ(criterias.size(), 0);
}
{
milvus::engine::ArchiveConf conf("swap");
ASSERT_EQ(conf.GetType(), "swap");
auto criterias = conf.GetCriterias();
ASSERT_EQ(criterias.size(), 0);
}
{
fiu_init(0);
fiu_enable("ArchiveConf.ParseCritirias.OptionsParseCritiriasOutOfRange", 1, NULL, 0);
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf("swap", "disk:"));
fiu_disable("ArchiveConf.ParseCritirias.OptionsParseCritiriasOutOfRange");
}
{
fiu_enable("ArchiveConf.ParseCritirias.empty_tokens", 1, NULL, 0);
milvus::engine::ArchiveConf conf("swap", "");
ASSERT_TRUE(conf.GetCriterias().empty());
fiu_disable("ArchiveConf.ParseCritirias.empty_tokens");
}
{
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf1("swap", "disk:"));
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf2("swap", "disk:a"));
milvus::engine::ArchiveConf conf("swap", "disk:1024");
auto criterias = conf.GetCriterias();
ASSERT_EQ(criterias.size(), 1);
ASSERT_EQ(criterias["disk"], 1024);
}
{
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf1("swap", "days:"));
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf2("swap", "days:a"));
milvus::engine::ArchiveConf conf("swap", "days:100");
auto criterias = conf.GetCriterias();
ASSERT_EQ(criterias.size(), 1);
ASSERT_EQ(criterias["days"], 100);
}
{
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf1("swap", "days:"));
ASSERT_ANY_THROW(milvus::engine::ArchiveConf conf2("swap", "days:a"));
milvus::engine::ArchiveConf conf("swap", "days:100;disk:200");
auto criterias = conf.GetCriterias();
ASSERT_EQ(criterias.size(), 2);
ASSERT_EQ(criterias["days"], 100);
ASSERT_EQ(criterias["disk"], 200);
}
}
TEST_F(DBTest, DB_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
uint64_t qb = 5;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
std::thread search([&]() {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(2));
INIT_TIMER;
std::stringstream ss;
uint64_t count = 0;
uint64_t prev_count = 0;
for (auto j = 0; j < 10; ++j) {
ss.str("");
db_->Size(count);
prev_count = count;
START_TIMER;
std::vector<std::string> tags;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, qxb, result_ids, result_distances);
ss << "Search " << j << " With Size " << count / milvus::engine::M << " M";
STOP_TIMER(ss.str());
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size(), qb * k);
for (auto i = 0; i < qb; ++i) {
ss.str("");
ss << "Result [" << i << "]:";
for (auto t = 0; t < k; t++) {
ss << result_ids[i * k + t] << " ";
}
/* LOG(DEBUG) << ss.str(); */
}
ASSERT_TRUE(count >= prev_count);
std::this_thread::sleep_for(std::chrono::seconds(1));
}
});
int loop = INSERT_LOOP;
for (auto i = 0; i < loop; ++i) {
if (i == 40) {
db_->InsertVectors(TABLE_NAME, "", qxb);
ASSERT_EQ(qxb.id_array_.size(), qb);
} else {
uint64_t nb = 50;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
search.join();
uint64_t count;
stat = db_->GetTableRowCount(TABLE_NAME, count);
ASSERT_TRUE(stat.ok());
ASSERT_GT(count, 0);
// test invalid build db
{
auto options = GetOptions();
options.meta_.backend_uri_ = "dummy";
ASSERT_ANY_THROW(milvus::engine::DBFactory::Build(options));
options.meta_.backend_uri_ = "mysql://root:123456@127.0.0.1:3306/test";
ASSERT_ANY_THROW(milvus::engine::DBFactory::Build(options));
options.meta_.backend_uri_ = "dummy://root:123456@127.0.0.1:3306/test";
ASSERT_ANY_THROW(milvus::engine::DBFactory::Build(options));
}
}
TEST_F(DBTest, SEARCH_TEST) {
milvus::scheduler::OptimizerInst::GetInstance()->Init();
std::string config_path(CONFIG_PATH);
config_path += CONFIG_FILE;
milvus::server::Config& config = milvus::server::Config::GetInstance();
milvus::Status s = config.LoadConfigFile(config_path);
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
// prepare raw data
size_t nb = VECTOR_COUNT;
size_t nq = 10;
size_t k = 5;
milvus::engine::VectorsData xb, xq;
xb.vector_count_ = nb;
xb.float_data_.resize(nb * TABLE_DIM);
xq.vector_count_ = nq;
xq.float_data_.resize(nq * TABLE_DIM);
xb.id_array_.resize(nb);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis_xt(-1.0, 1.0);
for (size_t i = 0; i < nb * TABLE_DIM; i++) {
xb.float_data_[i] = dis_xt(gen);
if (i < nb) {
xb.id_array_[i] = i;
}
}
for (size_t i = 0; i < nq * TABLE_DIM; i++) {
xq.float_data_[i] = dis_xt(gen);
}
// result data
// std::vector<long> nns_gt(k*nq);
std::vector<int64_t> nns(k * nq); // nns = nearst neg search
// std::vector<float> dis_gt(k*nq);
std::vector<float> dis(k * nq);
// insert data
stat = db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
{
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
{
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
{
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
#ifdef CUSTOMIZATION
#ifdef MILVUS_GPU_VERSION
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
{
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
#endif
#endif
{ // search by specify index file
std::vector<std::string> file_ids;
// sometimes this case run fast to merge file and build index, old file will be deleted immediately,
// so the QueryByFileID cannot get files to search
// input 100 files ids to avoid random failure of this case
for (int i = 0; i < 100; i++) {
file_ids.push_back(std::to_string(i));
}
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByFileID(dummy_context_, TABLE_NAME, file_ids, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToSearch.throw_exception");
stat = db_->QueryByFileID(dummy_context_, TABLE_NAME, file_ids, k, 10, xq, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.FilesToSearch.throw_exception");
FIU_ENABLE_FIU("DBImpl.QueryByFileID.empty_files_array");
stat = db_->QueryByFileID(dummy_context_, TABLE_NAME, file_ids, k, 10, xq, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.QueryByFileID.empty_files_array");
}
// TODO(zhiru): PQ build takes forever
#if 0
{
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToSearch.throw_exception");
stat = db_->Query(dummy_context_, TABLE_NAME, tags, k, 10, xq, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.FilesToSearch.throw_exception");
}
#endif
#ifdef CUSTOMIZATION
#ifdef MILVUS_GPU_VERSION
// test FAISS_IVFSQ8H optimizer
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
std::vector<std::string> partition_tag;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_dists;
{
result_ids.clear();
result_dists.clear();
stat = db_->Query(dummy_context_, TABLE_NAME, partition_tag, k, 10, xq, result_ids, result_dists);
ASSERT_TRUE(stat.ok());
}
{ // search by specify index file
std::vector<std::string> file_ids;
// sometimes this case run fast to merge file and build index, old file will be deleted immediately,
// so the QueryByFileID cannot get files to search
// input 100 files ids to avoid random failure of this case
for (int i = 0; i < 100; i++) {
file_ids.push_back(std::to_string(i));
}
result_ids.clear();
result_dists.clear();
stat = db_->QueryByFileID(dummy_context_, TABLE_NAME, file_ids, k, 10, xq, result_ids, result_dists);
ASSERT_TRUE(stat.ok());
}
#endif
#endif
}
TEST_F(DBTest, PRELOADTABLE_TEST) {
fiu_init(0);
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
int loop = 5;
for (auto i = 0; i < loop; ++i) {
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
milvus::engine::TableIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
int64_t prev_cache_usage = milvus::cache::CpuCacheMgr::GetInstance()->CacheUsage();
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_TRUE(stat.ok());
int64_t cur_cache_usage = milvus::cache::CpuCacheMgr::GetInstance()->CacheUsage();
ASSERT_TRUE(prev_cache_usage < cur_cache_usage);
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToSearch.throw_exception");
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.FilesToSearch.throw_exception");
// create a partition
stat = db_->CreatePartition(TABLE_NAME, "part0", "0");
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("DBImpl.PreloadTable.null_engine");
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.null_engine");
FIU_ENABLE_FIU("DBImpl.PreloadTable.exceed_cache");
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.exceed_cache");
FIU_ENABLE_FIU("DBImpl.PreloadTable.engine_throw_exception");
stat = db_->PreloadTable(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.engine_throw_exception");
}
TEST_F(DBTest, SHUTDOWN_TEST) {
db_->Stop();
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_FALSE(stat.ok());
stat = db_->DescribeTable(table_info);
ASSERT_FALSE(stat.ok());
stat = db_->UpdateTableFlag(TABLE_NAME, 0);
ASSERT_FALSE(stat.ok());
stat = db_->CreatePartition(TABLE_NAME, "part0", "0");
ASSERT_FALSE(stat.ok());
stat = db_->DropPartition("part0");
ASSERT_FALSE(stat.ok());
stat = db_->DropPartitionByTag(TABLE_NAME, "0");
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::TableSchema> partition_schema_array;
stat = db_->ShowPartitions(TABLE_NAME, partition_schema_array);
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::TableSchema> table_infos;
stat = db_->AllTables(table_infos);
ASSERT_EQ(stat.code(), milvus::DB_ERROR);
bool has_table = false;
stat = db_->HasTable(table_info.table_id_, has_table);
ASSERT_FALSE(stat.ok());
milvus::engine::VectorsData xb;
stat = db_->InsertVectors(table_info.table_id_, "", xb);
ASSERT_FALSE(stat.ok());
stat = db_->Flush();
ASSERT_FALSE(stat.ok());
stat = db_->DeleteVector(table_info.table_id_, 0);
ASSERT_FALSE(stat.ok());
milvus::engine::IDNumbers ids_to_delete{0};
stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete);
ASSERT_FALSE(stat.ok());
stat = db_->Compact(table_info.table_id_);
ASSERT_FALSE(stat.ok());
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.table_id_, 0, vector);
ASSERT_FALSE(stat.ok());
stat = db_->PreloadTable(table_info.table_id_);
ASSERT_FALSE(stat.ok());
uint64_t row_count = 0;
stat = db_->GetTableRowCount(table_info.table_id_, row_count);
ASSERT_FALSE(stat.ok());
milvus::engine::TableIndex index;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
stat = db_->DescribeIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
stat = db_->DropIndex(TABLE_NAME);
ASSERT_FALSE(stat.ok());
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, table_info.table_id_, tags, 1, 1, xb, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
std::vector<std::string> file_ids;
stat = db_->QueryByFileID(dummy_context_, table_info.table_id_, file_ids, 1, 1, xb, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
stat = db_->Query(dummy_context_, table_info.table_id_, tags, 1, 1, milvus::engine::VectorsData(), result_ids,
result_distances);
ASSERT_FALSE(stat.ok());
stat = db_->DropTable(table_info.table_id_);
ASSERT_FALSE(stat.ok());
}
TEST_F(DBTest, BACK_TIMER_THREAD_1) {
fiu_init(0);
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
milvus::Status stat;
// test background timer thread
{
FIU_ENABLE_FIU("DBImpl.StartMetricTask.InvalidTotalCache");
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToMerge.throw_exception");
stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
int loop = 10;
for (auto i = 0; i < loop; ++i) {
int64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("DBImpl.StartMetricTask.InvalidTotalCache");
fiu_disable("SqliteMetaImpl.FilesToMerge.throw_exception");
}
FIU_ENABLE_FIU("DBImpl.StartMetricTask.InvalidTotalCache");
db_->Start();
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("DBImpl.StartMetricTask.InvalidTotalCache");
}
TEST_F(DBTest, BACK_TIMER_THREAD_2) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
int loop = 10;
for (auto i = 0; i < loop; ++i) {
int64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception");
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception");
}
TEST_F(DBTest, BACK_TIMER_THREAD_3) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
int loop = 10;
for (auto i = 0; i < loop; ++i) {
int64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
FIU_ENABLE_FIU("DBImpl.MergeFiles.Serialize_ThrowException");
db_->Start();
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("DBImpl.MergeFiles.Serialize_ThrowException");
}
TEST_F(DBTest, BACK_TIMER_THREAD_4) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
int loop = 10;
for (auto i = 0; i < loop; ++i) {
int64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
}
FIU_ENABLE_FIU("DBImpl.MergeFiles.Serialize_ErrorStatus");
db_->Start();
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("DBImpl.MergeFiles.Serialize_ErrorStatus");
}
TEST_F(DBTest, INDEX_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, 0, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
milvus::engine::TableIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.metric_type_ = (int)milvus::engine::MetricType::IP;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
fiu_init(0);
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTableIndex.throw_exception");
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.DescribeTableIndex.throw_exception");
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_PQ;
FIU_ENABLE_FIU("DBImpl.UpdateTableIndexRecursively.fail_update_table_index");
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.UpdateTableIndexRecursively.fail_update_table_index");
#ifdef CUSTOMIZATION
#ifdef MILVUS_GPU_VERSION
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
#endif
#endif
milvus::engine::TableIndex index_out;
stat = db_->DescribeIndex(table_info.table_id_, index_out);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(index.engine_type_, index_out.engine_type_);
ASSERT_EQ(index.nlist_, index_out.nlist_);
ASSERT_EQ(table_info.metric_type_, index_out.metric_type_);
stat = db_->DropIndex(table_info.table_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest, PARTITION_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// create partition and insert data
const int64_t PARTITION_COUNT = 5;
const int64_t INSERT_BATCH = 2000;
std::string table_name = TABLE_NAME;
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
std::string partition_tag = std::to_string(i);
std::string partition_name = table_name + "_" + partition_tag;
stat = db_->CreatePartition(table_name, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
// not allow nested partition
stat = db_->CreatePartition(partition_name, "dumy", "dummy");
ASSERT_FALSE(stat.ok());
// not allow duplicated partition
stat = db_->CreatePartition(table_name, partition_name, partition_tag);
ASSERT_FALSE(stat.ok());
milvus::engine::VectorsData xb;
BuildVectors(INSERT_BATCH, i, xb);
milvus::engine::IDNumbers vector_ids;
vector_ids.resize(INSERT_BATCH);
for (int64_t k = 0; k < INSERT_BATCH; k++) {
vector_ids[k] = i * INSERT_BATCH + k;
}
db_->InsertVectors(table_name, partition_tag, xb);
ASSERT_EQ(vector_ids.size(), INSERT_BATCH);
// insert data into not existed partition
stat = db_->InsertVectors(TABLE_NAME, "notexist", xb);
ASSERT_FALSE(stat.ok());
}
// duplicated partition is not allowed
stat = db_->CreatePartition(table_name, "", "0");
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::TableSchema> partition_schema_array;
stat = db_->ShowPartitions(table_name, partition_schema_array);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(partition_schema_array.size(), PARTITION_COUNT);
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
ASSERT_EQ(partition_schema_array[i].table_id_, table_name + "_" + std::to_string(i));
}
// check table existence
std::string special_part = "special";
stat = db_->CreatePartition(table_name, special_part, special_part);
ASSERT_TRUE(stat.ok());
bool has_table = false;
stat = db_->HasNativeTable(special_part, has_table);
ASSERT_FALSE(has_table);
stat = db_->HasTable(special_part, has_table);
ASSERT_TRUE(has_table);
{ // build index
milvus::engine::TableIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
index.metric_type_ = (int)milvus::engine::MetricType::L2;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
fiu_init(0);
FIU_ENABLE_FIU("DBImpl.BuildTableIndexRecursively.fail_build_table_Index_for_partition");
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.BuildTableIndexRecursively.fail_build_table_Index_for_partition");
FIU_ENABLE_FIU("DBImpl.BuildTableIndexRecursively.not_empty_err_msg");
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.BuildTableIndexRecursively.not_empty_err_msg");
uint64_t row_count = 0;
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, INSERT_BATCH * PARTITION_COUNT);
FIU_ENABLE_FIU("SqliteMetaImpl.Count.throw_exception");
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.Count.throw_exception");
FIU_ENABLE_FIU("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition");
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition");
}
{ // search
const int64_t nq = 5;
const int64_t topk = 10;
const int64_t nprobe = 10;
milvus::engine::VectorsData xq;
BuildVectors(nq, 0, xq);
// specify partition tags
std::vector<std::string> tags = {"0", std::to_string(PARTITION_COUNT - 1)};
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, TABLE_NAME, tags, topk, nprobe, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, nq);
// search in whole table
tags.clear();
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_, TABLE_NAME, tags, topk, nprobe, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, nq);
// search in all partitions(tag regex match)
tags.push_back("\\d");
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_, TABLE_NAME, tags, topk, nprobe, xq, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, nq);
}
stat = db_->DropPartition(table_name + "_0");
ASSERT_TRUE(stat.ok());
stat = db_->DropPartitionByTag(table_name, "1");
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
stat = db_->DropIndex(table_info.table_id_);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
stat = db_->DropIndex(table_info.table_id_);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
stat = db_->DropIndex(table_name);
ASSERT_TRUE(stat.ok());
stat = db_->DropTable(table_name);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
std::vector<milvus::engine::meta::TableSchema> table_schema_array;
stat = db_->AllTables(table_schema_array);
ASSERT_TRUE(stat.ok());
bool bfound = false;
for (auto& schema : table_schema_array) {
if (schema.table_id_ == TABLE_NAME) {
bfound = true;
break;
}
}
ASSERT_TRUE(bfound);
milvus::engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
uint64_t size;
db_->Size(size);
int loop = INSERT_LOOP;
for (auto i = 0; i < loop; ++i) {
uint64_t nb = 10;
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
std::this_thread::sleep_for(std::chrono::seconds(1));
db_->Size(size);
LOG(DEBUG) << "size=" << size;
ASSERT_LE(size, 1 * milvus::engine::G);
}
TEST_F(DBTest2, DELETE_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_TRUE(stat.ok());
bool has_table = false;
db_->HasTable(TABLE_NAME, has_table);
ASSERT_TRUE(has_table);
uint64_t size;
db_->Size(size);
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, 0, xb);
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(TABLE_NAME, "", xb);
milvus::engine::TableIndex index;
stat = db_->CreateIndex(TABLE_NAME, index);
// create partition, drop table will drop partition recursively
stat = db_->CreatePartition(TABLE_NAME, "part0", "0");
ASSERT_TRUE(stat.ok());
// fail drop table
fiu_init(0);
FIU_ENABLE_FIU("DBImpl.DropTableRecursively.failed");
stat = db_->DropTable(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableRecursively.failed");
stat = db_->DropTable(TABLE_NAME);
std::this_thread::sleep_for(std::chrono::seconds(2));
ASSERT_TRUE(stat.ok());
db_->HasTable(TABLE_NAME, has_table);
ASSERT_FALSE(has_table);
}
TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) {
std::string table_name = TABLE_NAME;
milvus::engine::meta::TableSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
BuildVectors(nb, 0, xb);
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(table_name, "", xb);
// create partition and insert data
const int64_t PARTITION_COUNT = 2;
const int64_t INSERT_BATCH = 2000;
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
std::string partition_tag = std::to_string(i);
std::string partition_name = table_name + "_" + partition_tag;
stat = db_->CreatePartition(table_name, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
milvus::engine::VectorsData xb;
BuildVectors(INSERT_BATCH, i, xb);
db_->InsertVectors(table_name, partition_tag, xb);
}
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
{
milvus::engine::TableInfo table_info;
stat = db_->GetTableInfo(table_name, table_info);
ASSERT_TRUE(stat.ok());
int64_t row_count = 0;
for (auto& part : table_info.partitions_stat_) {
row_count = 0;
for (auto& stat : part.segments_stat_) {
row_count += stat.row_count_;
ASSERT_EQ(stat.index_name_, "IDMAP");
ASSERT_GT(stat.data_size_, 0);
}
if (part.tag_ == milvus::engine::DEFAULT_PARTITON_TAG) {
ASSERT_EQ(row_count, VECTOR_COUNT);
} else {
ASSERT_EQ(row_count, INSERT_BATCH);
}
}
}
}
TEST_F(DBTestWAL, DB_INSERT_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
std::string partition_name = "part_name";
std::string partition_tag = "part_tag";
stat = db_->CreatePartition(table_info.table_id_, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.table_id_, partition_tag, qxb);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.table_id_, "", qxb);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.table_id_, "not exist", qxb);
ASSERT_FALSE(stat.ok());
db_->Flush(table_info.table_id_);
stat = db_->DropTable(table_info.table_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTestWAL, DB_STOP_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
for (int i = 0; i < 5; i++) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, i, qxb);
stat = db_->InsertVectors(table_info.table_id_, "", qxb);
ASSERT_TRUE(stat.ok());
}
db_->Stop();
db_->Start();
const int64_t topk = 10;
const int64_t nprobe = 10;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, nprobe, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, qb);
stat = db_->DropTable(table_info.table_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
for (int i = 0; i < 5; i++) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, i, qxb);
stat = db_->InsertVectors(table_info.table_id_, "", qxb);
ASSERT_TRUE(stat.ok());
}
const int64_t topk = 10;
const int64_t nprobe = 10;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, nprobe, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_NE(result_ids.size() / topk, qb);
fiu_init(0);
fiu_enable("DBImpl.ExexWalRecord.return", 1, nullptr, 0);
db_ = nullptr;
fiu_disable("DBImpl.ExexWalRecord.return");
auto options = GetOptions();
db_ = milvus::engine::DBFactory::Build(options);
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, nprobe, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size(), 0);
db_->Flush();
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, nprobe, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, qb);
}
TEST_F(DBTestWALRecovery_Error, RECOVERY_WITH_INVALID_LOG_FILE) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->InsertVectors(table_info.table_id_, "", qxb);
ASSERT_TRUE(stat.ok());
fiu_init(0);
fiu_enable("DBImpl.ExexWalRecord.return", 1, nullptr, 0);
db_ = nullptr;
fiu_disable("DBImpl.ExexWalRecord.return");
auto options = GetOptions();
// delete wal log file so that recovery will failed when start db next time.
boost::filesystem::remove(options.mxlog_path_ + "0.wal");
ASSERT_ANY_THROW(db_ = milvus::engine::DBFactory::Build(options));
}
TEST_F(DBTest2, FLUSH_NON_EXISTING_TABLE) {
auto status = db_->Flush("non_existing_table");
ASSERT_FALSE(status.ok());
}
TEST_F(DBTest2, GET_VECTOR_NON_EXISTING_TABLE) {
milvus::engine::VectorsData vector;
auto status = db_->GetVectorByID("non_existing_table", 0, vector);
ASSERT_FALSE(status.ok());
}
TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 1000;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
std::string partition_name = "part_name";
std::string partition_tag = "part_tag";
stat = db_->CreatePartition(table_info.table_id_, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.table_id_, partition_tag, qxb);
ASSERT_TRUE(stat.ok());
db_->Flush(table_info.table_id_);
milvus::engine::VectorsData vector_data;
stat = db_->GetVectorByID(TABLE_NAME, qxb.id_array_[0], vector_data);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(vector_data.vector_count_, 1);
ASSERT_EQ(vector_data.float_data_.size(), TABLE_DIM);
for (int64_t i = 0; i < TABLE_DIM; i++) {
ASSERT_FLOAT_EQ(vector_data.float_data_[i], qxb.float_data_[i]);
}
}
TEST_F(DBTest2, GET_VECTOR_IDS_TEST) {
milvus::engine::meta::TableSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
ASSERT_TRUE(stat.ok());
uint64_t BATCH_COUNT = 1000;
milvus::engine::VectorsData vector_1;
BuildVectors(BATCH_COUNT, 0, vector_1);
stat = db_->InsertVectors(TABLE_NAME, "", vector_1);
ASSERT_TRUE(stat.ok());
std::string partition_tag = "part_tag";
stat = db_->CreatePartition(TABLE_NAME, "", partition_tag);
ASSERT_TRUE(stat.ok());
milvus::engine::VectorsData vector_2;
BuildVectors(BATCH_COUNT, 1, vector_2);
stat = db_->InsertVectors(TABLE_NAME, partition_tag, vector_2);
ASSERT_TRUE(stat.ok());
db_->Flush();
milvus::engine::TableInfo table_info;
stat = db_->GetTableInfo(TABLE_NAME, table_info);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info.partitions_stat_.size(), 2UL);
std::string default_segment = table_info.partitions_stat_[0].segments_stat_[0].name_;
std::string partition_segment = table_info.partitions_stat_[1].segments_stat_[0].name_;
milvus::engine::IDNumbers vector_ids;
stat = db_->GetVectorIDs(TABLE_NAME, default_segment, vector_ids);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(vector_ids.size(), BATCH_COUNT);
stat = db_->GetVectorIDs(TABLE_NAME, partition_segment, vector_ids);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(vector_ids.size(), BATCH_COUNT);
milvus::engine::IDNumbers ids_to_delete{0, 100, 999, 1000, 1500, 1888, 1999};
stat = db_->DeleteVectors(TABLE_NAME, ids_to_delete);
ASSERT_TRUE(stat.ok());
db_->Flush();
stat = db_->GetVectorIDs(TABLE_NAME, default_segment, vector_ids);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(vector_ids.size(), BATCH_COUNT - 3);
stat = db_->GetVectorIDs(TABLE_NAME, partition_segment, vector_ids);
ASSERT_TRUE(stat.ok());
// ASSERT_EQ(vector_ids.size(), BATCH_COUNT - 4);
}
TEST_F(DBTest2, INSERT_DUPLICATE_ID) {
auto options = GetOptions();
options.wal_enable_ = false;
db_ = milvus::engine::DBFactory::Build(options);
milvus::engine::meta::TableSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
ASSERT_TRUE(stat.ok());
uint64_t size = 20;
milvus::engine::VectorsData vector;
BuildVectors(size, 0, vector);
vector.id_array_.clear();
for (int i = 0; i < size; ++i) {
vector.id_array_.emplace_back(0);
}
stat = db_->InsertVectors(TABLE_NAME, "", vector);
ASSERT_TRUE(stat.ok());
stat = db_->Flush(TABLE_NAME);
ASSERT_TRUE(stat.ok());
}
/*
TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
// table_info.index_file_size_ = 1 * milvus::engine::M;
auto stat = db_->CreateTable(table_info);
int loop = 10;
uint64_t nb = 100000;
for (auto i = 0; i < loop; ++i) {
milvus::engine::VectorsData xb;
BuildVectors(nb, i, xb);
db_->InsertVectors(TABLE_NAME, "", xb);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
}
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<int64_t> dis(0, nb * loop - 1);
int64_t num_query = 10;
std::vector<int64_t> ids_to_search;
for (int64_t i = 0; i < num_query; ++i) {
int64_t index = dis(gen);
ids_to_search.emplace_back(index);
}
milvus::engine::TableIndex index;
// index.metric_type_ = (int)milvus::engine::MetricType::IP;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(table_info.table_id_);
ASSERT_TRUE(stat.ok());
int topk = 10, nprobe = 10;
for (auto id : ids_to_search) {
// std::cout << "xxxxxxxxxxxxxxxxxxxx " << i << std::endl;
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, nprobe, id, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);
ASSERT_LT(result_distances[0], 1e-4);
}
db_->DropIndex(table_info.table_id_);
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(table_info.table_id_);
ASSERT_TRUE(stat.ok());
for (auto id : ids_to_search) {
// std::cout << "xxxxxxxxxxxxxxxxxxxx " << i << std::endl;
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, nprobe, id, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);
ASSERT_LT(result_distances[0], 1e-4);
}
}
*/