mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 01:28:27 +08:00
* add read/write lock * change compact to ddl queue * add api to get vector data * add flush / merge / compact lock * add api to get vector data * add data size for table info * add db recovery test * add data_size check * change file name to uppercase Signed-off-by: jinhai <hai.jin@zilliz.com> * update wal flush_merge_compact_mutex_ * update wal flush_merge_compact_mutex_ * change requirement * change requirement * upd requirement * add logging * add logging * add logging * add logging * add logging * add logging * add logging * add logging * add logging * delete part * add all size checks * fix bug * update faiss get_vector_by_id * add get_vector case * update get vector by id * update server * fix DBImpl * attempting to fix #1268 * lint * update unit test * fix #1259 * issue 1271 fix wal config * update * fix cases Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * update read / write error message * update read / write error message * [skip ci] get vectors by id from raw files instead faiss * [skip ci] update FilesByType meta * update * fix ci error * update * lint * Hide partition_name parameter * Remove douban pip source Signed-off-by: zhenwu <zw@zilliz.com> * Update epsilon value in test cases Signed-off-by: zhenwu <zw@zilliz.com> * Add default partition * Caiyd crud (#1313) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * add faiss_bitset_test Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * avoid user directly operate partition table * fix has table bug * Caiyd crud (#1323) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * use compile option -O3 Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update faiss_bitset_test.cpp Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * change open flags * change OngoingFileChecker to static instance * mark ongoing files when applying deletes * update clean up with ttl * fix centos ci * update * lint * update partition Signed-off-by: zhenwu <zw@zilliz.com> * update delete and flush to include partitions * update * Update cases Signed-off-by: zhenwu <zw@zilliz.com> * Fix test cases crud (#1350) * fix order * add wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * add table info test cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Signed-off-by: JinHai-CN <hai.jin@zilliz.com> * merge cases Signed-off-by: zhenwu <zw@zilliz.com> * Shengjun (#1349) * Add GPU sharing solution on native Kubernetes (#1102) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Fix http server bug (#1096) * refactoring(create_table done) * refactoring * refactor server delivery (insert done) * refactoring server module (count_table done) * server refactor done * cmake pass * refactor server module done. * set grpc response status correctly * format done. * fix redefine ErrorMap() * optimize insert reducing ids data copy * optimize grpc request with reducing data copy * clang format * [skip ci] Refactor server module done. update changlog. prepare for PR * remove explicit and change int32_t to int64_t * add web server * [skip ci] add license in web module * modify header include & comment oatpp environment config * add port configure & create table in handler * modify web url * simple url complation done & add swagger * make sure web url * web functionality done. debuging * add web unittest * web test pass * add web server port * add web server port in template * update unittest cmake file * change web server default port to 19121 * rename method in web module & unittest pass * add search case in unittest for web module * rename some variables * fix bug * unittest pass * web prepare * fix cmd bug(check server status) * update changlog * add web port validate & default set * clang-format pass * add web port test in unittest * add CORS & redirect root to swagger ui * add web status * web table method func cascade test pass * add config url in web module * modify thirdparty cmake to avoid building oatpp test * clang format * update changlog * add constants in web module * reserve Config.cpp * fix constants reference bug * replace web server with async module * modify component to support async * format * developing controller & add test clent into unittest * add web port into demo/server_config * modify thirdparty cmake to allow build test * remove unnecessary comment * add endpoint info in controller * finish web test(bug here) * clang format * add web test cpp to lint exclusions * check null field in GetConfig * add macro RETURN STATUS DTo * fix cmake conflict * fix crash when exit server * remove surplus comments & add http param check * add uri /docs to direct swagger * format * change cmd to system * add default value & unittest in web module * add macros to judge if GPU supported * add macros in unit & add default in index dto & print error message when bind http port fail * format (fix #788) * fix cors bug (not completed) * comment cors * change web framework to simple api * comments optimize * change to simple API * remove comments in controller.hpp * remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger * add ep cmake args to sqlite * clang-format * change a format * test pass * change name to * fix compiler issue(oatpp-swagger depend on oatpp) * add & in start_server.h * specify lib location with oatpp and oatpp-swagger * add comments * add swagger definition * [skip ci] change http method options status code * remove oatpp swagger(fix #970) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * [skip ci] Fix some broken links (#960) * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken links * fix issue 373 (#964) * fix issue 373 * Adjustment format * Adjustment format * Adjustment format * change readme * #966 update NOTICE.md (#967) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * adjust web port cofig place * rename web_port variable * change gpu resources invoke way to cmd() * set advanced config name add DEFAULT * change config setting to cmd * modify .. * optimize code * assign TableDto' count default value 0 (fix #995) * check if table exists when show partitions (fix #1028) * check table exists when drop partition (fix #1029) * check if partition name is legal (fix #1022) * modify status code when partition tag is illegal * update changlog * add info to /system url * add binary index and add bin uri & handler method(not completed) * optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067) * fix test partition bug * fix test bug when check insert records * add binary vectors test * add default for offset and page_size * fix uinttest bug * [skip ci] remove comments * optimize web code for PR comments * add new folder named utils * check offset and pagesize (fix #1082) * improve error message if offset or page_size is not legal (fix #1075) * add log into web module * update changlog * check gpu sources setting when assign repeated value (fix #990) * update changlog * clang-format pass * add default handler in http handler * [skip ci] improve error msg when check gpu resources * change check offset way * remove func IsIntStr * add case * change int32 to int64 when check number str * add log in we module(doing) * update test case * add log in web controller Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> * Filtering for specific paths in Jenkins CI (#1107) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Fix Filtering for specific paths in Jenkins CI bug (#1109) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Fix Filtering for specific paths in Jenkins CI bug (#1110) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Don't skip ci when triggered by a time (#1113) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Don't skip ci when triggered by a time * Don't skip ci when triggered by a time * Set default sending to Milvus Dev mail group (#1121) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * No skip ci when triggered by a time * Don't skip ci when triggered by a time * Set default sending to Milvus Dev * Support hnsw (#1131) * add hnsw * add config * format... * format.. * Remove test.template (#1129) * Update framework * remove files * Remove files * Remove ann-acc cases && Update java-sdk cases * change cn to en * [skip ci] remove doc test * [skip ci] change cn to en * Case stability * Add mail notification when test failed * Add main notification * Add main notification * gen milvus instance from utils * Distable case with multiprocess * Add mail notification when nightly test failed * add milvus handler param * add http handler * Remove test.template Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> * Add doc for the RESTful API / Update contributor number in Milvus readme (#1100) * [skip ci] Update contributor number. * [skip ci] Add RESTful API doc. * [skip ci] Some updates. * [skip ci] Change port to 19121. * [skip ci] Update README.md. Update the descriptions for OPTIONS. * Update README.md Fix a typo. * #1105 update error message when creating IVFSQ8H index without GPU resources (#1117) * [skip ci] Update README (#1104) * remove Nvidia owned files from faiss (#1136) * #1135 remove Nvidia owned files from faiss * Revert "#1135 remove Nvidia owned files from faiss" This reverts commit 3bc007c28c8df5861fdd0452fd64c0e2e719eda2. * #1135 remove Nvidia API implementation * #1135 remove Nvidia owned files from faiss * Update CODE_OF_CONDUCT.md (#1163) * Improve codecov (#1095) * Optimize config test. Dir src/config 99% lines covered * add unittest coverage * optimize cache&config unittest * code format * format * format code * fix merge conflict * cover src/utils unittest * '#831 fix exe_path judge error' * #831 fix exe_path judge error * add some unittest coverage * add some unittest coverage * improve coverage of src/wrapper * improve src/wrapper coverage * *test optimize db/meta unittest * fix bug * *test optimize mysqlMetaImpl unittest * *style: format code * import server& scheduler unittest coverage * handover next work * *test: add some test_meta test case * *format code * *fix: fix typo * feat(codecov): improve code coverage for src/db(#872) * feat(codecov): improve code coverage for src/db/engine(#872) * feat(codecov): improve code coverage(#872) * fix config unittest bug * feat(codecov): improve code coverage core/db/engine(#872) * feat(codecov): improve code coverage core/knowhere * feat(codecov): improve code coverage core/knowhere * feat(codecov): improve code coverage * feat(codecov): fix cpu test some error * feat(codecov): improve code coverage * feat(codecov): rename some fiu * fix(db/meta): fix switch/case default action * feat(codecov): improve code coverage(#872) * fix error caused by merge code * format code * feat(codecov): improve code coverage & format code(#872) * feat(codecov): fix test error(#872) * feat(codecov): fix unittest test_mem(#872) * feat(codecov): fix unittest(#872) * feat(codecov): fix unittest for resource manager(#872) * feat(codecov): code format (#872) * feat(codecov): trigger ci(#872) * fix(RequestScheduler): remove a wrong sleep statement * test(test_rpc): fix rpc test * Fix format issue * Remove unused comments * Fix unit test error Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: Jin Hai <hai.jin@zilliz.com> * Support run dev test with http handler in python SDK (#1116) * refactoring(create_table done) * refactoring * refactor server delivery (insert done) * refactoring server module (count_table done) * server refactor done * cmake pass * refactor server module done. * set grpc response status correctly * format done. * fix redefine ErrorMap() * optimize insert reducing ids data copy * optimize grpc request with reducing data copy * clang format * [skip ci] Refactor server module done. update changlog. prepare for PR * remove explicit and change int32_t to int64_t * add web server * [skip ci] add license in web module * modify header include & comment oatpp environment config * add port configure & create table in handler * modify web url * simple url complation done & add swagger * make sure web url * web functionality done. debuging * add web unittest * web test pass * add web server port * add web server port in template * update unittest cmake file * change web server default port to 19121 * rename method in web module & unittest pass * add search case in unittest for web module * rename some variables * fix bug * unittest pass * web prepare * fix cmd bug(check server status) * update changlog * add web port validate & default set * clang-format pass * add web port test in unittest * add CORS & redirect root to swagger ui * add web status * web table method func cascade test pass * add config url in web module * modify thirdparty cmake to avoid building oatpp test * clang format * update changlog * add constants in web module * reserve Config.cpp * fix constants reference bug * replace web server with async module * modify component to support async * format * developing controller & add test clent into unittest * add web port into demo/server_config * modify thirdparty cmake to allow build test * remove unnecessary comment * add endpoint info in controller * finish web test(bug here) * clang format * add web test cpp to lint exclusions * check null field in GetConfig * add macro RETURN STATUS DTo * fix cmake conflict * fix crash when exit server * remove surplus comments & add http param check * add uri /docs to direct swagger * format * change cmd to system * add default value & unittest in web module * add macros to judge if GPU supported * add macros in unit & add default in index dto & print error message when bind http port fail * format (fix #788) * fix cors bug (not completed) * comment cors * change web framework to simple api * comments optimize * change to simple API * remove comments in controller.hpp * remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger * add ep cmake args to sqlite * clang-format * change a format * test pass * change name to * fix compiler issue(oatpp-swagger depend on oatpp) * add & in start_server.h * specify lib location with oatpp and oatpp-swagger * add comments * add swagger definition * [skip ci] change http method options status code * remove oatpp swagger(fix #970) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * [skip ci] Fix some broken links (#960) * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken links * fix issue 373 (#964) * fix issue 373 * Adjustment format * Adjustment format * Adjustment format * change readme * #966 update NOTICE.md (#967) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * adjust web port cofig place * rename web_port variable * change gpu resources invoke way to cmd() * set advanced config name add DEFAULT * change config setting to cmd * modify .. * optimize code * assign TableDto' count default value 0 (fix #995) * check if table exists when show partitions (fix #1028) * check table exists when drop partition (fix #1029) * check if partition name is legal (fix #1022) * modify status code when partition tag is illegal * update changlog * add info to /system url * add binary index and add bin uri & handler method(not completed) * optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067) * fix test partition bug * fix test bug when check insert records * add binary vectors test * add default for offset and page_size * fix uinttest bug * [skip ci] remove comments * optimize web code for PR comments * add new folder named utils * check offset and pagesize (fix #1082) * improve error message if offset or page_size is not legal (fix #1075) * add log into web module * update changlog * check gpu sources setting when assign repeated value (fix #990) * update changlog * clang-format pass * add default handler in http handler * [skip ci] improve error msg when check gpu resources * change check offset way * remove func IsIntStr * add case * change int32 to int64 when check number str * add log in we module(doing) * update test case * add log in web controller * remove surplus dot * add preload into /system/ * change get_milvus() to get_milvus(args['handler']) * support load table into memory with http server (fix #1115) * [skip ci] comment surplus dto in VectorDto Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> * Fix #1140 (#1162) * fix Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update... Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * fix2 Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * fix3 Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update changelog Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * Update INSTALL.md (#1175) * Update INSTALL.md 1. Change image tag and Milvus source code to latest. 2. Fix a typo Signed-off-by: Lu Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: lu.wang <yamasite@qq.com> * add Tanimoto ground truth (#1138) * add milvus ground truth * add milvus groundtruth * [skip ci] add milvus ground truth * [skip ci]add tanimoto ground truth * fix mix case bug (#1208) * fix mix case bug Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * Remove case.md Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * Update README.md (#1206) Add LFAI mailing lists. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Add design.md to store links to design docs (#1219) * Update README.md Add link to Milvus design docs Signed-off-by: Lutkin Wang <yamasite@qq.com> * Create design.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update design.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Add troubleshooting info about libmysqlpp.so.3 error (#1225) * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update README.md (#1233) Signed-off-by: Lutkin Wang <yamasite@qq.com> * #1240 Update license declaration of each file (#1241) * #1240 Update license declaration of each files Signed-off-by: jinhai <hai.jin@zilliz.com> * #1240 Update CHANGELOG Signed-off-by: jinhai <hai.jin@zilliz.com> * Update README.md (#1258) Add Jenkins master badge. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md (#1265) Fix indentation. * support CPU profiling (#1251) * #1250 support CPU profiling Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * #1250 fix code coverage Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * Fix HNSW crash (#1262) * fix Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * update. Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * Add troubleshooting information for INSTALL.md and enhance readability (#1274) * Update INSTALL.md 1. Add new troubleshooting message; 2. Enhance readability. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Add CentOS link. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Create COMMUNITY.md (#1292) Signed-off-by: Lutkin Wang <yamasite@qq.com> * fix gtest * add copyright * fix gtest * MERGE_NOT_YET * fix lint Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com> Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com> Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com> Co-authored-by: Lutkin Wang <yamasite@qq.com> Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com> Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: Jin Hai <hai.jin@zilliz.com> Co-authored-by: shiyu22 <cshiyu22@gmail.com> * #1302 Get all record IDs in a segment by given a segment id * Remove query time ranges Signed-off-by: zhenwu <zw@zilliz.com> * #1295 let wal enable by default * fix cases Signed-off-by: zhenwu <zw@zilliz.com> * fix partition cases Signed-off-by: zhenwu <zw@zilliz.com> * [skip ci] update test_db * update * fix case bug Signed-off-by: zhenwu <zw@zilliz.com> * lint * fix test case failures * remove some code * Caiyd crud 1 (#1377) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix build issue when enable profiling Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix hastable bug * update bloom filter * update * benchmark * update benchmark * update * update * remove wal record size Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * remove wal record size config Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * update apply deletes: switch to binary search * update sdk_simple Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update apply deletes: switch to binary search * add test_search_by_id Signed-off-by: zhenwu <zw@zilliz.com> * add more log * flush error with multi same ids Signed-off-by: zhenwu <zw@zilliz.com> * modify wal config Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * update * add binary search_by_id * fix case bug Signed-off-by: zhenwu <zw@zilliz.com> * update cases Signed-off-by: zhenwu <zw@zilliz.com> * fix unit test #1395 * improve merge performance * add uids_ for VectorIndex to improve search performance Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update * fix search * fix record num Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * refine code * refine code * Add get_vector_ids test cases (#1407) * fix order * add wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * add table info test cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Signed-off-by: JinHai-CN <hai.jin@zilliz.com> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add case and debug compact Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * pdb test Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * pdb test Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add tests for get_vector_ids Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add binary and ip Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix binary index Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * #1408 fix search result in-correct after DeleteById Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * add one case * delete failed segment * update serialize * update serialize * fix case Signed-off-by: zhenwu <zw@zilliz.com> * update * update case assertion Signed-off-by: zhenwu <zw@zilliz.com> * [skip ci] update config * change bloom filter msync flag to async * #1319 add more timing debug info Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update * update * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * Fix compiling error Signed-off-by: jinhai <hai.jin@zilliz.com> * support ip (#1383) * support ip Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * IP result distance sort by descend Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * format Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * get table lsn * Remove unused third party Signed-off-by: jinhai <hai.jin@zilliz.com> * Refine code Signed-off-by: jinhai <hai.jin@zilliz.com> * #1319 fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix wal applied lsn Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * validate partition tag * #1319 improve search performance Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * build error Co-authored-by: Zhiru Zhu <youny626@hotmail.com> Co-authored-by: groot <yihua.mo@zilliz.com> Co-authored-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com> Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com> Co-authored-by: shengjun.li <49774184+shengjun1985@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com> Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com> Co-authored-by: Lutkin Wang <yamasite@qq.com> Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: shiyu22 <cshiyu22@gmail.com>
1566 lines
66 KiB
Python
1566 lines
66 KiB
Python
"""
|
|
For testing index operations, including `create_index`, `describe_index` and `drop_index` interfaces
|
|
"""
|
|
import logging
|
|
import pytest
|
|
import time
|
|
import pdb
|
|
import threading
|
|
from multiprocessing import Pool, Process
|
|
import numpy
|
|
import sklearn.preprocessing
|
|
from milvus import IndexType, MetricType
|
|
from utils import *
|
|
|
|
nb = 6000
|
|
dim = 128
|
|
index_file_size = 10
|
|
vectors = gen_vectors(nb, dim)
|
|
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
|
|
vectors = vectors.tolist()
|
|
BUILD_TIMEOUT = 300
|
|
nprobe = 1
|
|
tag = "1970-01-01"
|
|
NLIST = 16384
|
|
|
|
|
|
class TestIndexBase:
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_index_params()
|
|
)
|
|
def get_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_simple_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `create_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
if index_params["index_type"] == IndexType.IVF_SQ8:
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
status = connect.create_index(table, index_params)
|
|
assert status.OK()
|
|
else:
|
|
pytest.skip("skip other index types")
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_no_vectors(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_index(table, index_params)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_partition(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table, create partition, and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(table, tag)
|
|
status, ids = connect.add_vectors(table, vectors, partition_tag=tag)
|
|
status = connect.create_index(table, index_params)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.level(2)
|
|
def test_create_index_without_connect(self, dis_connect, table):
|
|
'''
|
|
target: test create index without connection
|
|
method: create table and add vectors in it, check if added successfully
|
|
expected: raise exception
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.create_index(table, index_param)
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_search_with_query_vectors(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface, search with more query vectors
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
status = connect.create_index(table, index_params)
|
|
logging.getLogger().info(connect.describe_index(table))
|
|
query_vecs = [vectors[0], vectors[1], vectors[2]]
|
|
top_k = 5
|
|
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
|
|
assert status.OK()
|
|
assert len(result) == len(query_vecs)
|
|
logging.getLogger().info(result)
|
|
|
|
# TODO: enable
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
@pytest.mark.level(2)
|
|
def _test_create_index_multiprocessing(self, connect, table, args):
|
|
'''
|
|
target: test create index interface with multiprocess
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
|
|
def build(connect):
|
|
status = connect.create_index(table)
|
|
assert status.OK()
|
|
|
|
process_num = 8
|
|
processes = []
|
|
uri = "tcp://%s:%s" % (args["ip"], args["port"])
|
|
|
|
for i in range(process_num):
|
|
m = get_milvus(args["handler"])
|
|
m.connect(uri=uri)
|
|
p = Process(target=build, args=(m,))
|
|
processes.append(p)
|
|
p.start()
|
|
time.sleep(0.2)
|
|
for p in processes:
|
|
p.join()
|
|
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(table, top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
assert result[0][0].distance == 0.0
|
|
|
|
# TODO: enable
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def _test_create_index_multiprocessing_multitable(self, connect, args):
|
|
'''
|
|
target: test create index interface with multiprocess
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
process_num = 8
|
|
loop_num = 8
|
|
processes = []
|
|
|
|
table = []
|
|
j = 0
|
|
while j < (process_num*loop_num):
|
|
table_name = gen_unique_str("test_create_index_multiprocessing")
|
|
table.append(table_name)
|
|
param = {'table_name': table_name,
|
|
'dimension': dim,
|
|
'index_type': IndexType.FLAT,
|
|
'store_raw_vector': False}
|
|
connect.create_table(param)
|
|
j = j + 1
|
|
|
|
def create_index():
|
|
i = 0
|
|
while i < loop_num:
|
|
# assert connect.has_table(table[ids*process_num+i])
|
|
status, ids = connect.add_vectors(table[ids*process_num+i], vectors)
|
|
|
|
status = connect.create_index(table[ids*process_num+i])
|
|
assert status.OK()
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(table[ids*process_num+i], top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
assert result[0][0].distance == 0.0
|
|
i = i + 1
|
|
|
|
uri = "tcp://%s:%s" % (args["ip"], args["port"])
|
|
|
|
for i in range(process_num):
|
|
m = get_milvus(args["handler"])
|
|
m.connect(uri=uri)
|
|
ids = i
|
|
p = Process(target=create_index, args=(m,ids))
|
|
processes.append(p)
|
|
p.start()
|
|
time.sleep(0.2)
|
|
for p in processes:
|
|
p.join()
|
|
|
|
def test_create_index_table_not_existed(self, connect):
|
|
'''
|
|
target: test create index interface when table name not existed
|
|
method: create table and add vectors in it, create index
|
|
, make sure the table name not in index
|
|
expected: return code not equals to 0, create index failed
|
|
'''
|
|
table_name = gen_unique_str(self.__class__.__name__)
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status = connect.create_index(table_name, index_param)
|
|
assert not status.OK()
|
|
|
|
def test_create_index_table_None(self, connect):
|
|
'''
|
|
target: test create index interface when table name is None
|
|
method: create table and add vectors in it, create index with an table_name: None
|
|
expected: return code not equals to 0, create index failed
|
|
'''
|
|
table_name = None
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = connect.create_index(table_name, index_param)
|
|
|
|
def test_create_index_no_vectors(self, connect, table):
|
|
'''
|
|
target: test create index interface when there is no vectors in table
|
|
method: create table and add no vectors in it, and then create index
|
|
expected: return code equals to 0
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status = connect.create_index(table, index_param)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_no_vectors_then_add_vectors(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
|
|
method: create table and add no vectors in it, and then create index, add vectors in it
|
|
expected: return code equals to 0
|
|
'''
|
|
index_param = get_simple_index_params
|
|
if index_param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status = connect.create_index(table, index_param)
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_same_index_repeatedly(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: check if index can be created repeatedly, with the same create_index params
|
|
method: create index after index have been built
|
|
expected: return code success, and search ok
|
|
'''
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
index_param = get_simple_index_params
|
|
if index_param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status = connect.create_index(table, index_param)
|
|
status = connect.create_index(table, index_param)
|
|
assert status.OK()
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(table, top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_different_index_repeatedly(self, connect, table):
|
|
'''
|
|
target: check if index can be created repeatedly, with the different create_index params
|
|
method: create another index with different index_params after index have been built
|
|
expected: return code 0, and describe index result equals with the second index params
|
|
'''
|
|
nlist = NLIST
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
index_type_1 = IndexType.IVF_SQ8
|
|
index_type_2 = IndexType.IVFLAT
|
|
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
|
|
logging.getLogger().info(index_params)
|
|
for index_param in index_params:
|
|
status = connect.create_index(table, index_param)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
assert result._nlist == nlist
|
|
assert result._table_name == table
|
|
assert result._index_type == index_type_2
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `describe_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_describe_index(self, connect, table, get_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_index_params
|
|
logging.getLogger().info(index_params)
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
status = connect.create_index(table, index_params)
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == table
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index_params):
|
|
'''
|
|
target: test create, describe and drop index interface with multiple tables of L2
|
|
method: create tables and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
nq = 100
|
|
vectors = gen_vectors(nq, dim)
|
|
table_list = []
|
|
for i in range(10):
|
|
table_name = gen_unique_str()
|
|
table_list.append(table_name)
|
|
param = {'table_name': table_name,
|
|
'dimension': dim,
|
|
'index_file_size': index_file_size,
|
|
'metric_type': MetricType.L2}
|
|
connect.create_table(param)
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
|
|
status = connect.create_index(table_name, index_params)
|
|
assert status.OK()
|
|
|
|
for i in range(10):
|
|
status, result = connect.describe_index(table_list[i])
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == table_list[i]
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
for i in range(10):
|
|
status = connect.drop_index(table_list[i])
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table_list[i])
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table_list[i]
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
@pytest.mark.level(2)
|
|
def test_describe_index_without_connect(self, dis_connect, table):
|
|
'''
|
|
target: test describe index without connection
|
|
method: describe index, and check if describe successfully
|
|
expected: raise exception
|
|
'''
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.describe_index(table)
|
|
|
|
def test_describe_index_table_not_existed(self, connect):
|
|
'''
|
|
target: test describe index interface when table name not existed
|
|
method: create table and add vectors in it, create index
|
|
, make sure the table name not in index
|
|
expected: return code not equals to 0, describe index failed
|
|
'''
|
|
table_name = gen_unique_str(self.__class__.__name__)
|
|
status, result = connect.describe_index(table_name)
|
|
assert not status.OK()
|
|
|
|
def test_describe_index_table_None(self, connect):
|
|
'''
|
|
target: test describe index interface when table name is None
|
|
method: create table and add vectors in it, create index with an table_name: None
|
|
expected: return code not equals to 0, describe index failed
|
|
'''
|
|
table_name = None
|
|
with pytest.raises(Exception) as e:
|
|
status = connect.describe_index(table_name)
|
|
|
|
def test_describe_index_not_create(self, connect, table):
|
|
'''
|
|
target: test describe index interface when index not created
|
|
method: create table and add vectors in it, create index
|
|
, make sure the table name not in index
|
|
expected: return code not equals to 0, describe index failed
|
|
'''
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert status.OK()
|
|
# assert result._nlist == index_params["nlist"]
|
|
# assert result._table_name == table
|
|
# assert result._index_type == index_params["index_type"]
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `drop_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_drop_index(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table and add vectors in it, create index, call drop index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_param = get_simple_index_params
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
status = connect.create_index(table, index_param)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_repeatly(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test drop index repeatly
|
|
method: create index, call drop index, and drop again
|
|
expected: return code 0
|
|
'''
|
|
index_param = get_simple_index_params
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
status = connect.create_index(table, index_param)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(table)
|
|
assert status.OK()
|
|
status = connect.drop_index(table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
@pytest.mark.level(2)
|
|
def test_drop_index_without_connect(self, dis_connect, table):
|
|
'''
|
|
target: test drop index without connection
|
|
method: drop index, and check if drop successfully
|
|
expected: raise exception
|
|
'''
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.drop_index(table)
|
|
|
|
def test_drop_index_table_not_existed(self, connect):
|
|
'''
|
|
target: test drop index interface when table name not existed
|
|
method: create table and add vectors in it, create index
|
|
, make sure the table name not in index, and then drop it
|
|
expected: return code not equals to 0, drop index failed
|
|
'''
|
|
table_name = gen_unique_str(self.__class__.__name__)
|
|
status = connect.drop_index(table_name)
|
|
assert not status.OK()
|
|
|
|
def test_drop_index_table_None(self, connect):
|
|
'''
|
|
target: test drop index interface when table name is None
|
|
method: create table and add vectors in it, create index with an table_name: None
|
|
expected: return code not equals to 0, drop index failed
|
|
'''
|
|
table_name = None
|
|
with pytest.raises(Exception) as e:
|
|
status = connect.drop_index(table_name)
|
|
|
|
def test_drop_index_table_not_create(self, connect, table):
|
|
'''
|
|
target: test drop index interface when index not created
|
|
method: create table and add vectors in it, create index
|
|
expected: return code not equals to 0, drop index failed
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status, ids = connect.add_vectors(table, vectors)
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
# no create index
|
|
status = connect.drop_index(table)
|
|
logging.getLogger().info(status)
|
|
assert status.OK()
|
|
|
|
def test_create_drop_index_repeatly(self, connect, table, get_simple_index_params):
|
|
'''
|
|
target: test create / drop index repeatly, use the same index params
|
|
method: create index, drop index, four times
|
|
expected: return code 0
|
|
'''
|
|
index_params = get_simple_index_params
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
for i in range(2):
|
|
status = connect.create_index(table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_create_drop_index_repeatly_different_index_params(self, connect, table):
|
|
'''
|
|
target: test create / drop index repeatly, use the different index params
|
|
method: create index, drop index, four times, each tme use different index_params to create index
|
|
expected: return code 0
|
|
'''
|
|
nlist = NLIST
|
|
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
for i in range(2):
|
|
status = connect.create_index(table, index_params[i])
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
|
|
class TestIndexIP:
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_index_params()
|
|
)
|
|
def get_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_simple_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `create_index` function
|
|
******************************************************************
|
|
"""
|
|
@pytest.mark.level(2)
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ip_table, index_params)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_table(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table, create partition, and add vectors in it, create index on table
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(ip_table, tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(ip_table, index_params)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.level(2)
|
|
def test_create_index_without_connect(self, dis_connect, ip_table):
|
|
'''
|
|
target: test create index without connection
|
|
method: create table and add vectors in it, check if added successfully
|
|
expected: raise exception
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.create_index(ip_table, index_param)
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_search_with_query_vectors(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface, search with more query vectors
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ip_table, index_params)
|
|
logging.getLogger().info(connect.describe_index(ip_table))
|
|
query_vecs = [vectors[0], vectors[1], vectors[2]]
|
|
top_k = 5
|
|
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
|
|
logging.getLogger().info(result)
|
|
assert status.OK()
|
|
assert len(result) == len(query_vecs)
|
|
|
|
# TODO: enable
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
@pytest.mark.level(2)
|
|
def _test_create_index_multiprocessing(self, connect, ip_table, args):
|
|
'''
|
|
target: test create index interface with multiprocess
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
|
|
def build(connect):
|
|
status = connect.create_index(ip_table)
|
|
assert status.OK()
|
|
|
|
process_num = 8
|
|
processes = []
|
|
uri = "tcp://%s:%s" % (args["ip"], args["port"])
|
|
|
|
for i in range(process_num):
|
|
m = get_milvus(args["handler"])
|
|
m.connect(uri=uri)
|
|
p = Process(target=build, args=(m,))
|
|
processes.append(p)
|
|
p.start()
|
|
time.sleep(0.2)
|
|
for p in processes:
|
|
p.join()
|
|
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
assert result[0][0].distance == 0.0
|
|
|
|
# TODO: enable
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def _test_create_index_multiprocessing_multitable(self, connect, args):
|
|
'''
|
|
target: test create index interface with multiprocess
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
process_num = 8
|
|
loop_num = 8
|
|
processes = []
|
|
|
|
table = []
|
|
j = 0
|
|
while j < (process_num*loop_num):
|
|
table_name = gen_unique_str("test_create_index_multiprocessing")
|
|
table.append(table_name)
|
|
param = {'table_name': table_name,
|
|
'dimension': dim}
|
|
connect.create_table(param)
|
|
j = j + 1
|
|
|
|
def create_index():
|
|
i = 0
|
|
while i < loop_num:
|
|
# assert connect.has_table(table[ids*process_num+i])
|
|
status, ids = connect.add_vectors(table[ids*process_num+i], vectors)
|
|
|
|
status = connect.create_index(table[ids*process_num+i])
|
|
assert status.OK()
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(table[ids*process_num+i], top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
assert result[0][0].distance == 0.0
|
|
i = i + 1
|
|
|
|
uri = "tcp://%s:%s" % (args["ip"], args["port"])
|
|
|
|
for i in range(process_num):
|
|
m = get_milvus(args["handler"])
|
|
m.connect(uri=uri)
|
|
ids = i
|
|
p = Process(target=create_index, args=(m,ids))
|
|
processes.append(p)
|
|
p.start()
|
|
time.sleep(0.2)
|
|
for p in processes:
|
|
p.join()
|
|
|
|
def test_create_index_no_vectors(self, connect, ip_table):
|
|
'''
|
|
target: test create index interface when there is no vectors in table
|
|
method: create table and add no vectors in it, and then create index
|
|
expected: return code equals to 0
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status = connect.create_index(ip_table, index_param)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
|
|
method: create table and add no vectors in it, and then create index, add vectors in it
|
|
expected: return code equals to 0
|
|
'''
|
|
index_param = get_simple_index_params
|
|
if index_param["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status = connect.create_index(ip_table, index_param)
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_same_index_repeatedly(self, connect, ip_table):
|
|
'''
|
|
target: check if index can be created repeatedly, with the same create_index params
|
|
method: create index after index have been built
|
|
expected: return code success, and search ok
|
|
'''
|
|
nlist = NLIST
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status = connect.create_index(ip_table, index_param)
|
|
status = connect.create_index(ip_table, index_param)
|
|
assert status.OK()
|
|
query_vec = [vectors[0]]
|
|
top_k = 1
|
|
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vec)
|
|
assert len(result) == 1
|
|
assert len(result[0]) == top_k
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_different_index_repeatedly(self, connect, ip_table):
|
|
'''
|
|
target: check if index can be created repeatedly, with the different create_index params
|
|
method: create another index with different index_params after index have been built
|
|
expected: return code 0, and describe index result equals with the second index params
|
|
'''
|
|
nlist = NLIST
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
index_type_1 = IndexType.IVF_SQ8
|
|
index_type_2 = IndexType.IVFLAT
|
|
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
|
|
logging.getLogger().info(index_params)
|
|
for index_param in index_params:
|
|
status = connect.create_index(ip_table, index_param)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
assert result._nlist == nlist
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == index_type_2
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `describe_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_describe_index(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_simple_index_params
|
|
logging.getLogger().info(index_params)
|
|
# status, ids = connect.add_vectors(ip_table, vectors[:5000])
|
|
status = connect.create_index(ip_table, index_params)
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._table_name == ip_table
|
|
status, mode = connect._cmd("mode")
|
|
if str(mode) == "GPU" and index_params["index_type"] == IndexType.IVF_PQ:
|
|
assert result._index_type == IndexType.FLAT
|
|
assert result._nlist == NLIST
|
|
else:
|
|
assert result._index_type == index_params["index_type"]
|
|
assert result._nlist == index_params["nlist"]
|
|
|
|
def test_describe_index_partition(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table, create partition and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(ip_table, tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(ip_table, index_params)
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
def test_describe_index_partition_A(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table, create partitions and add vectors in it, create index on partitions, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
new_tag = "new_tag"
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(ip_table, tag)
|
|
status = connect.create_partition(ip_table, new_tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=new_tag)
|
|
status = connect.create_index(ip_table, index_params)
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index_params):
|
|
'''
|
|
target: test create, describe and drop index interface with multiple tables of IP
|
|
method: create tables and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
nq = 100
|
|
vectors = gen_vectors(nq, dim)
|
|
table_list = []
|
|
for i in range(10):
|
|
table_name = gen_unique_str()
|
|
table_list.append(table_name)
|
|
param = {'table_name': table_name,
|
|
'dimension': dim,
|
|
'index_file_size': index_file_size,
|
|
'metric_type': MetricType.IP}
|
|
connect.create_table(param)
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
|
|
status = connect.create_index(table_name, index_params)
|
|
assert status.OK()
|
|
for i in range(10):
|
|
status, result = connect.describe_index(table_list[i])
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == table_list[i]
|
|
assert result._index_type == index_params["index_type"]
|
|
for i in range(10):
|
|
status = connect.drop_index(table_list[i])
|
|
assert status.OK()
|
|
status, result = connect.describe_index(table_list[i])
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == table_list[i]
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
@pytest.mark.level(2)
|
|
def test_describe_index_without_connect(self, dis_connect, ip_table):
|
|
'''
|
|
target: test describe index without connection
|
|
method: describe index, and check if describe successfully
|
|
expected: raise exception
|
|
'''
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.describe_index(ip_table)
|
|
|
|
def test_describe_index_not_create(self, connect, ip_table):
|
|
'''
|
|
target: test describe index interface when index not created
|
|
method: create table and add vectors in it, create index
|
|
, make sure the table name not in index
|
|
expected: return code not equals to 0, describe index failed
|
|
'''
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert status.OK()
|
|
# assert result._nlist == index_params["nlist"]
|
|
# assert result._table_name == table
|
|
# assert result._index_type == index_params["index_type"]
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `drop_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_drop_index(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table and add vectors in it, create index, call drop index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_simple_index_params
|
|
status, mode = connect._cmd("mode")
|
|
assert status.OK()
|
|
# status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ip_table, index_params)
|
|
if str(mode) == "GPU" and (index_params["index_type"] == IndexType.IVF_PQ):
|
|
assert not status.OK()
|
|
else:
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_partition(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table, create partition and add vectors in it, create index on table, call drop table index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status = connect.create_partition(ip_table, tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(ip_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_partition_C(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table, create partitions and add vectors in it, create index on partitions, call drop partition index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
new_tag = "new_tag"
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status = connect.create_partition(ip_table, tag)
|
|
status = connect.create_partition(ip_table, new_tag)
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ip_table, index_params)
|
|
assert status.OK()
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_repeatly(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test drop index repeatly
|
|
method: create index, call drop index, and drop again
|
|
expected: return code 0
|
|
'''
|
|
index_params = get_simple_index_params
|
|
# status, ids = connect.add_vectors(ip_table, vectors)
|
|
status, mode = connect._cmd("mode")
|
|
assert status.OK()
|
|
# status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ip_table, index_params)
|
|
if str(mode) == "GPU" and (index_params["index_type"] == IndexType.IVF_PQ):
|
|
assert not status.OK()
|
|
else:
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
@pytest.mark.level(2)
|
|
def test_drop_index_without_connect(self, dis_connect, ip_table):
|
|
'''
|
|
target: test drop index without connection
|
|
method: drop index, and check if drop successfully
|
|
expected: raise exception
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVFLAT, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.drop_index(ip_table, index_param)
|
|
|
|
def test_drop_index_table_not_create(self, connect, ip_table):
|
|
'''
|
|
target: test drop index interface when index not created
|
|
method: create table and add vectors in it, create index
|
|
expected: return code not equals to 0, drop index failed
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
logging.getLogger().info(index_param)
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
# no create index
|
|
status = connect.drop_index(ip_table)
|
|
logging.getLogger().info(status)
|
|
assert status.OK()
|
|
|
|
def test_create_drop_index_repeatly(self, connect, ip_table, get_simple_index_params):
|
|
'''
|
|
target: test create / drop index repeatly, use the same index params
|
|
method: create index, drop index, four times
|
|
expected: return code 0
|
|
'''
|
|
index_params = get_simple_index_params
|
|
if index_params["index_type"] == IndexType.IVF_PQ:
|
|
pytest.skip("Skip some PQ cases")
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
for i in range(2):
|
|
status = connect.create_index(ip_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_table):
|
|
'''
|
|
target: test create / drop index repeatly, use the different index params
|
|
method: create index, drop index, four times, each tme use different index_params to create index
|
|
expected: return code 0
|
|
'''
|
|
nlist = NLIST
|
|
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
|
|
status, ids = connect.add_vectors(ip_table, vectors)
|
|
for i in range(2):
|
|
status = connect.create_index(ip_table, index_params[i])
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
assert result._nlist == index_params[i]["nlist"]
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == index_params[i]["index_type"]
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ip_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ip_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ip_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
|
|
class TestIndexJAC:
|
|
tmp, vectors = gen_binary_vectors(nb, dim)
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_index_params()
|
|
)
|
|
def get_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_simple_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_jaccard_index_params(self, request, connect):
|
|
logging.getLogger().info(request.param)
|
|
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
|
|
return request.param
|
|
else:
|
|
pytest.skip("Skip index Temporary")
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `create_index` function
|
|
******************************************************************
|
|
"""
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(jac_table, self.vectors)
|
|
status = connect.create_index(jac_table, index_params)
|
|
if index_params["index_type"] != IndexType.FLAT and index_params["index_type"] != IndexType.IVFLAT:
|
|
assert not status.OK()
|
|
else:
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_partition(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table, create partition, and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(jac_table, tag)
|
|
status, ids = connect.add_vectors(jac_table, self.vectors, partition_tag=tag)
|
|
status = connect.create_index(jac_table, index_params)
|
|
assert status.OK()
|
|
|
|
@pytest.mark.level(2)
|
|
def test_create_index_without_connect(self, dis_connect, jac_table):
|
|
'''
|
|
target: test create index without connection
|
|
method: create table and add vectors in it, check if added successfully
|
|
expected: raise exception
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.create_index(jac_table, index_param)
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_search_with_query_vectors(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test create index interface, search with more query vectors
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(jac_table, self.vectors)
|
|
status = connect.create_index(jac_table, index_params)
|
|
logging.getLogger().info(connect.describe_index(jac_table))
|
|
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
|
|
top_k = 5
|
|
status, result = connect.search_vectors(jac_table, top_k, nprobe, query_vecs)
|
|
logging.getLogger().info(result)
|
|
assert status.OK()
|
|
assert len(result) == len(query_vecs)
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `describe_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_describe_index(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
logging.getLogger().info(index_params)
|
|
# status, ids = connect.add_vectors(jac_table, vectors[:5000])
|
|
status = connect.create_index(jac_table, index_params)
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
assert result._table_name == jac_table
|
|
assert result._index_type == index_params["index_type"]
|
|
assert result._nlist == index_params["nlist"]
|
|
|
|
def test_describe_index_partition(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table, create partition and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(jac_table, tag)
|
|
status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(jac_table, index_params)
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == jac_table
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `drop_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_drop_index(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table and add vectors in it, create index, call drop index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
status, mode = connect._cmd("mode")
|
|
assert status.OK()
|
|
# status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(jac_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(jac_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == jac_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_partition(self, connect, jac_table, get_jaccard_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table, create partition and add vectors in it, create index on table, call drop table index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_jaccard_index_params
|
|
status = connect.create_partition(jac_table, tag)
|
|
status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(jac_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(jac_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(jac_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == jac_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
|
|
class TestIndexHAM:
|
|
tmp, vectors = gen_binary_vectors(nb, dim)
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_index_params()
|
|
)
|
|
def get_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_simple_index_params(self, request, connect):
|
|
if str(connect._cmd("mode")[1]) == "CPU":
|
|
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
|
pytest.skip("sq8h not support in CPU mode")
|
|
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
|
|
pytest.skip("Skip PQ Temporary")
|
|
return request.param
|
|
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_simple_index_params()
|
|
)
|
|
def get_hamming_index_params(self, request, connect):
|
|
logging.getLogger().info(request.param)
|
|
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
|
|
return request.param
|
|
else:
|
|
pytest.skip("Skip index Temporary")
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `create_index` function
|
|
******************************************************************
|
|
"""
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(ham_table, self.vectors)
|
|
status = connect.create_index(ham_table, index_params)
|
|
if index_params["index_type"] != IndexType.FLAT and index_params["index_type"] != IndexType.IVFLAT:
|
|
assert not status.OK()
|
|
else:
|
|
assert status.OK()
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_partition(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test create index interface
|
|
method: create table, create partition, and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(ham_table, tag)
|
|
status, ids = connect.add_vectors(ham_table, self.vectors, partition_tag=tag)
|
|
status = connect.create_index(ham_table, index_params)
|
|
assert status.OK()
|
|
status, res = connect.get_table_row_count(ham_table)
|
|
assert res == len(self.vectors)
|
|
|
|
@pytest.mark.level(2)
|
|
def test_create_index_without_connect(self, dis_connect, ham_table):
|
|
'''
|
|
target: test create index without connection
|
|
method: create table and add vectors in it, check if added successfully
|
|
expected: raise exception
|
|
'''
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
with pytest.raises(Exception) as e:
|
|
status = dis_connect.create_index(ham_table, index_param)
|
|
|
|
@pytest.mark.timeout(BUILD_TIMEOUT)
|
|
def test_create_index_search_with_query_vectors(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test create index interface, search with more query vectors
|
|
method: create table and add vectors in it, create index
|
|
expected: return code equals to 0, and search success
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
logging.getLogger().info(index_params)
|
|
status, ids = connect.add_vectors(ham_table, self.vectors)
|
|
status = connect.create_index(ham_table, index_params)
|
|
logging.getLogger().info(connect.describe_index(ham_table))
|
|
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
|
|
top_k = 5
|
|
status, result = connect.search_vectors(ham_table, top_k, nprobe, query_vecs)
|
|
logging.getLogger().info(result)
|
|
assert status.OK()
|
|
assert len(result) == len(query_vecs)
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `describe_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_describe_index(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
logging.getLogger().info(index_params)
|
|
# status, ids = connect.add_vectors(jac_table, vectors[:5000])
|
|
status = connect.create_index(ham_table, index_params)
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
assert result._table_name == ham_table
|
|
assert result._index_type == index_params["index_type"]
|
|
assert result._nlist == index_params["nlist"]
|
|
|
|
def test_describe_index_partition(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test describe index interface
|
|
method: create table, create partition and add vectors in it, create index, call describe index
|
|
expected: return code 0, and index instructure
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
logging.getLogger().info(index_params)
|
|
status = connect.create_partition(ham_table, tag)
|
|
status, ids = connect.add_vectors(ham_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(ham_table, index_params)
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == index_params["nlist"]
|
|
assert result._table_name == ham_table
|
|
assert result._index_type == index_params["index_type"]
|
|
|
|
"""
|
|
******************************************************************
|
|
The following cases are used to test `drop_index` function
|
|
******************************************************************
|
|
"""
|
|
|
|
def test_drop_index(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table and add vectors in it, create index, call drop index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
status, mode = connect._cmd("mode")
|
|
assert status.OK()
|
|
# status, ids = connect.add_vectors(ip_table, vectors)
|
|
status = connect.create_index(ham_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ham_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ham_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
def test_drop_index_partition(self, connect, ham_table, get_hamming_index_params):
|
|
'''
|
|
target: test drop index interface
|
|
method: create table, create partition and add vectors in it, create index on table, call drop table index
|
|
expected: return code 0, and default index param
|
|
'''
|
|
index_params = get_hamming_index_params
|
|
status = connect.create_partition(ham_table, tag)
|
|
status, ids = connect.add_vectors(ham_table, vectors, partition_tag=tag)
|
|
status = connect.create_index(ham_table, index_params)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
status = connect.drop_index(ham_table)
|
|
assert status.OK()
|
|
status, result = connect.describe_index(ham_table)
|
|
logging.getLogger().info(result)
|
|
assert result._nlist == NLIST
|
|
assert result._table_name == ham_table
|
|
assert result._index_type == IndexType.FLAT
|
|
|
|
class TestIndexTableInvalid(object):
|
|
"""
|
|
Test create / describe / drop index interfaces with invalid table names
|
|
"""
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_invalid_table_names()
|
|
)
|
|
def get_table_name(self, request):
|
|
yield request.param
|
|
|
|
@pytest.mark.level(1)
|
|
def test_create_index_with_invalid_tablename(self, connect, get_table_name):
|
|
table_name = get_table_name
|
|
nlist = NLIST
|
|
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
|
status = connect.create_index(table_name, index_param)
|
|
assert not status.OK()
|
|
|
|
@pytest.mark.level(1)
|
|
def test_describe_index_with_invalid_tablename(self, connect, get_table_name):
|
|
table_name = get_table_name
|
|
status, result = connect.describe_index(table_name)
|
|
assert not status.OK()
|
|
|
|
@pytest.mark.level(1)
|
|
def test_drop_index_with_invalid_tablename(self, connect, get_table_name):
|
|
table_name = get_table_name
|
|
status = connect.drop_index(table_name)
|
|
assert not status.OK()
|
|
|
|
|
|
class TestCreateIndexParamsInvalid(object):
|
|
"""
|
|
Test Building index with invalid table names, table names not in db
|
|
"""
|
|
@pytest.fixture(
|
|
scope="function",
|
|
params=gen_invalid_index_params()
|
|
)
|
|
def get_index_params(self, request):
|
|
yield request.param
|
|
|
|
@pytest.mark.level(1)
|
|
def test_create_index_with_invalid_index_params(self, connect, table, get_index_params):
|
|
index_params = get_index_params
|
|
index_type = index_params["index_type"]
|
|
nlist = index_params["nlist"]
|
|
logging.getLogger().info(index_params)
|
|
# status, ids = connect.add_vectors(table, vectors)
|
|
if (not index_type) or (not nlist) or (not isinstance(index_type, IndexType)) or (not isinstance(nlist, int)):
|
|
with pytest.raises(Exception) as e:
|
|
status = connect.create_index(table, index_params)
|
|
else:
|
|
status = connect.create_index(table, index_params)
|
|
assert not status.OK()
|