mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 09:38:39 +08:00
* add read/write lock * change compact to ddl queue * add api to get vector data * add flush / merge / compact lock * add api to get vector data * add data size for table info * add db recovery test * add data_size check * change file name to uppercase Signed-off-by: jinhai <hai.jin@zilliz.com> * update wal flush_merge_compact_mutex_ * update wal flush_merge_compact_mutex_ * change requirement * change requirement * upd requirement * add logging * add logging * add logging * add logging * add logging * add logging * add logging * add logging * add logging * delete part * add all size checks * fix bug * update faiss get_vector_by_id * add get_vector case * update get vector by id * update server * fix DBImpl * attempting to fix #1268 * lint * update unit test * fix #1259 * issue 1271 fix wal config * update * fix cases Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * update read / write error message * update read / write error message * [skip ci] get vectors by id from raw files instead faiss * [skip ci] update FilesByType meta * update * fix ci error * update * lint * Hide partition_name parameter * Remove douban pip source Signed-off-by: zhenwu <zw@zilliz.com> * Update epsilon value in test cases Signed-off-by: zhenwu <zw@zilliz.com> * Add default partition * Caiyd crud (#1313) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * add faiss_bitset_test Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * avoid user directly operate partition table * fix has table bug * Caiyd crud (#1323) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * use compile option -O3 Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update faiss_bitset_test.cpp Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * change open flags * change OngoingFileChecker to static instance * mark ongoing files when applying deletes * update clean up with ttl * fix centos ci * update * lint * update partition Signed-off-by: zhenwu <zw@zilliz.com> * update delete and flush to include partitions * update * Update cases Signed-off-by: zhenwu <zw@zilliz.com> * Fix test cases crud (#1350) * fix order * add wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * add table info test cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Signed-off-by: JinHai-CN <hai.jin@zilliz.com> * merge cases Signed-off-by: zhenwu <zw@zilliz.com> * Shengjun (#1349) * Add GPU sharing solution on native Kubernetes (#1102) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Fix http server bug (#1096) * refactoring(create_table done) * refactoring * refactor server delivery (insert done) * refactoring server module (count_table done) * server refactor done * cmake pass * refactor server module done. * set grpc response status correctly * format done. * fix redefine ErrorMap() * optimize insert reducing ids data copy * optimize grpc request with reducing data copy * clang format * [skip ci] Refactor server module done. update changlog. prepare for PR * remove explicit and change int32_t to int64_t * add web server * [skip ci] add license in web module * modify header include & comment oatpp environment config * add port configure & create table in handler * modify web url * simple url complation done & add swagger * make sure web url * web functionality done. debuging * add web unittest * web test pass * add web server port * add web server port in template * update unittest cmake file * change web server default port to 19121 * rename method in web module & unittest pass * add search case in unittest for web module * rename some variables * fix bug * unittest pass * web prepare * fix cmd bug(check server status) * update changlog * add web port validate & default set * clang-format pass * add web port test in unittest * add CORS & redirect root to swagger ui * add web status * web table method func cascade test pass * add config url in web module * modify thirdparty cmake to avoid building oatpp test * clang format * update changlog * add constants in web module * reserve Config.cpp * fix constants reference bug * replace web server with async module * modify component to support async * format * developing controller & add test clent into unittest * add web port into demo/server_config * modify thirdparty cmake to allow build test * remove unnecessary comment * add endpoint info in controller * finish web test(bug here) * clang format * add web test cpp to lint exclusions * check null field in GetConfig * add macro RETURN STATUS DTo * fix cmake conflict * fix crash when exit server * remove surplus comments & add http param check * add uri /docs to direct swagger * format * change cmd to system * add default value & unittest in web module * add macros to judge if GPU supported * add macros in unit & add default in index dto & print error message when bind http port fail * format (fix #788) * fix cors bug (not completed) * comment cors * change web framework to simple api * comments optimize * change to simple API * remove comments in controller.hpp * remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger * add ep cmake args to sqlite * clang-format * change a format * test pass * change name to * fix compiler issue(oatpp-swagger depend on oatpp) * add & in start_server.h * specify lib location with oatpp and oatpp-swagger * add comments * add swagger definition * [skip ci] change http method options status code * remove oatpp swagger(fix #970) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * [skip ci] Fix some broken links (#960) * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken links * fix issue 373 (#964) * fix issue 373 * Adjustment format * Adjustment format * Adjustment format * change readme * #966 update NOTICE.md (#967) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * adjust web port cofig place * rename web_port variable * change gpu resources invoke way to cmd() * set advanced config name add DEFAULT * change config setting to cmd * modify .. * optimize code * assign TableDto' count default value 0 (fix #995) * check if table exists when show partitions (fix #1028) * check table exists when drop partition (fix #1029) * check if partition name is legal (fix #1022) * modify status code when partition tag is illegal * update changlog * add info to /system url * add binary index and add bin uri & handler method(not completed) * optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067) * fix test partition bug * fix test bug when check insert records * add binary vectors test * add default for offset and page_size * fix uinttest bug * [skip ci] remove comments * optimize web code for PR comments * add new folder named utils * check offset and pagesize (fix #1082) * improve error message if offset or page_size is not legal (fix #1075) * add log into web module * update changlog * check gpu sources setting when assign repeated value (fix #990) * update changlog * clang-format pass * add default handler in http handler * [skip ci] improve error msg when check gpu resources * change check offset way * remove func IsIntStr * add case * change int32 to int64 when check number str * add log in we module(doing) * update test case * add log in web controller Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> * Filtering for specific paths in Jenkins CI (#1107) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Fix Filtering for specific paths in Jenkins CI bug (#1109) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Fix Filtering for specific paths in Jenkins CI bug (#1110) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Don't skip ci when triggered by a time (#1113) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Don't skip ci when triggered by a time * Don't skip ci when triggered by a time * Set default sending to Milvus Dev mail group (#1121) * run hadolint with reviewdog * add LINCENSE in Dockerfile * run hadolint with reviewdog * Reporter of reviewdog command is "github-pr-check" * format Dockerfile * ignore DL3007 in hadolint * clean up old docker images * Add GPU sharing solution on native Kubernetes * nightly test mailer * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Test filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * Filtering for specific paths in Jenkins CI * No skip ci when triggered by a time * Don't skip ci when triggered by a time * Set default sending to Milvus Dev * Support hnsw (#1131) * add hnsw * add config * format... * format.. * Remove test.template (#1129) * Update framework * remove files * Remove files * Remove ann-acc cases && Update java-sdk cases * change cn to en * [skip ci] remove doc test * [skip ci] change cn to en * Case stability * Add mail notification when test failed * Add main notification * Add main notification * gen milvus instance from utils * Distable case with multiprocess * Add mail notification when nightly test failed * add milvus handler param * add http handler * Remove test.template Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> * Add doc for the RESTful API / Update contributor number in Milvus readme (#1100) * [skip ci] Update contributor number. * [skip ci] Add RESTful API doc. * [skip ci] Some updates. * [skip ci] Change port to 19121. * [skip ci] Update README.md. Update the descriptions for OPTIONS. * Update README.md Fix a typo. * #1105 update error message when creating IVFSQ8H index without GPU resources (#1117) * [skip ci] Update README (#1104) * remove Nvidia owned files from faiss (#1136) * #1135 remove Nvidia owned files from faiss * Revert "#1135 remove Nvidia owned files from faiss" This reverts commit 3bc007c28c8df5861fdd0452fd64c0e2e719eda2. * #1135 remove Nvidia API implementation * #1135 remove Nvidia owned files from faiss * Update CODE_OF_CONDUCT.md (#1163) * Improve codecov (#1095) * Optimize config test. Dir src/config 99% lines covered * add unittest coverage * optimize cache&config unittest * code format * format * format code * fix merge conflict * cover src/utils unittest * '#831 fix exe_path judge error' * #831 fix exe_path judge error * add some unittest coverage * add some unittest coverage * improve coverage of src/wrapper * improve src/wrapper coverage * *test optimize db/meta unittest * fix bug * *test optimize mysqlMetaImpl unittest * *style: format code * import server& scheduler unittest coverage * handover next work * *test: add some test_meta test case * *format code * *fix: fix typo * feat(codecov): improve code coverage for src/db(#872) * feat(codecov): improve code coverage for src/db/engine(#872) * feat(codecov): improve code coverage(#872) * fix config unittest bug * feat(codecov): improve code coverage core/db/engine(#872) * feat(codecov): improve code coverage core/knowhere * feat(codecov): improve code coverage core/knowhere * feat(codecov): improve code coverage * feat(codecov): fix cpu test some error * feat(codecov): improve code coverage * feat(codecov): rename some fiu * fix(db/meta): fix switch/case default action * feat(codecov): improve code coverage(#872) * fix error caused by merge code * format code * feat(codecov): improve code coverage & format code(#872) * feat(codecov): fix test error(#872) * feat(codecov): fix unittest test_mem(#872) * feat(codecov): fix unittest(#872) * feat(codecov): fix unittest for resource manager(#872) * feat(codecov): code format (#872) * feat(codecov): trigger ci(#872) * fix(RequestScheduler): remove a wrong sleep statement * test(test_rpc): fix rpc test * Fix format issue * Remove unused comments * Fix unit test error Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: Jin Hai <hai.jin@zilliz.com> * Support run dev test with http handler in python SDK (#1116) * refactoring(create_table done) * refactoring * refactor server delivery (insert done) * refactoring server module (count_table done) * server refactor done * cmake pass * refactor server module done. * set grpc response status correctly * format done. * fix redefine ErrorMap() * optimize insert reducing ids data copy * optimize grpc request with reducing data copy * clang format * [skip ci] Refactor server module done. update changlog. prepare for PR * remove explicit and change int32_t to int64_t * add web server * [skip ci] add license in web module * modify header include & comment oatpp environment config * add port configure & create table in handler * modify web url * simple url complation done & add swagger * make sure web url * web functionality done. debuging * add web unittest * web test pass * add web server port * add web server port in template * update unittest cmake file * change web server default port to 19121 * rename method in web module & unittest pass * add search case in unittest for web module * rename some variables * fix bug * unittest pass * web prepare * fix cmd bug(check server status) * update changlog * add web port validate & default set * clang-format pass * add web port test in unittest * add CORS & redirect root to swagger ui * add web status * web table method func cascade test pass * add config url in web module * modify thirdparty cmake to avoid building oatpp test * clang format * update changlog * add constants in web module * reserve Config.cpp * fix constants reference bug * replace web server with async module * modify component to support async * format * developing controller & add test clent into unittest * add web port into demo/server_config * modify thirdparty cmake to allow build test * remove unnecessary comment * add endpoint info in controller * finish web test(bug here) * clang format * add web test cpp to lint exclusions * check null field in GetConfig * add macro RETURN STATUS DTo * fix cmake conflict * fix crash when exit server * remove surplus comments & add http param check * add uri /docs to direct swagger * format * change cmd to system * add default value & unittest in web module * add macros to judge if GPU supported * add macros in unit & add default in index dto & print error message when bind http port fail * format (fix #788) * fix cors bug (not completed) * comment cors * change web framework to simple api * comments optimize * change to simple API * remove comments in controller.hpp * remove EP_COMMON_CMAKE_ARGS in oatpp and oatpp-swagger * add ep cmake args to sqlite * clang-format * change a format * test pass * change name to * fix compiler issue(oatpp-swagger depend on oatpp) * add & in start_server.h * specify lib location with oatpp and oatpp-swagger * add comments * add swagger definition * [skip ci] change http method options status code * remove oatpp swagger(fix #970) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * [skip ci] Fix some broken links (#960) * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken link * [skip ci] Fix broken links * fix issue 373 (#964) * fix issue 373 * Adjustment format * Adjustment format * Adjustment format * change readme * #966 update NOTICE.md (#967) * remove comments * check Start web behavior * add default to cpu_cache_capacity * remove swagger component.hpp & /docs url * remove /docs info * remove /docs in unittest * remove space in test rpc * remove repeate info in CHANGLOG * change cache_insert_data default value as a constant * adjust web port cofig place * rename web_port variable * change gpu resources invoke way to cmd() * set advanced config name add DEFAULT * change config setting to cmd * modify .. * optimize code * assign TableDto' count default value 0 (fix #995) * check if table exists when show partitions (fix #1028) * check table exists when drop partition (fix #1029) * check if partition name is legal (fix #1022) * modify status code when partition tag is illegal * update changlog * add info to /system url * add binary index and add bin uri & handler method(not completed) * optimize http insert and search time(fix #1066) | add binary vectors support(fix #1067) * fix test partition bug * fix test bug when check insert records * add binary vectors test * add default for offset and page_size * fix uinttest bug * [skip ci] remove comments * optimize web code for PR comments * add new folder named utils * check offset and pagesize (fix #1082) * improve error message if offset or page_size is not legal (fix #1075) * add log into web module * update changlog * check gpu sources setting when assign repeated value (fix #990) * update changlog * clang-format pass * add default handler in http handler * [skip ci] improve error msg when check gpu resources * change check offset way * remove func IsIntStr * add case * change int32 to int64 when check number str * add log in we module(doing) * update test case * add log in web controller * remove surplus dot * add preload into /system/ * change get_milvus() to get_milvus(args['handler']) * support load table into memory with http server (fix #1115) * [skip ci] comment surplus dto in VectorDto Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> * Fix #1140 (#1162) * fix Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update... Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * fix2 Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * fix3 Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update changelog Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * Update INSTALL.md (#1175) * Update INSTALL.md 1. Change image tag and Milvus source code to latest. 2. Fix a typo Signed-off-by: Lu Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: lu.wang <yamasite@qq.com> * add Tanimoto ground truth (#1138) * add milvus ground truth * add milvus groundtruth * [skip ci] add milvus ground truth * [skip ci]add tanimoto ground truth * fix mix case bug (#1208) * fix mix case bug Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * Remove case.md Signed-off-by: del.zhenwu <zhenxiang.li@zilliz.com> * Update README.md (#1206) Add LFAI mailing lists. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Add design.md to store links to design docs (#1219) * Update README.md Add link to Milvus design docs Signed-off-by: Lutkin Wang <yamasite@qq.com> * Create design.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update design.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Add troubleshooting info about libmysqlpp.so.3 error (#1225) * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update README.md (#1233) Signed-off-by: Lutkin Wang <yamasite@qq.com> * #1240 Update license declaration of each file (#1241) * #1240 Update license declaration of each files Signed-off-by: jinhai <hai.jin@zilliz.com> * #1240 Update CHANGELOG Signed-off-by: jinhai <hai.jin@zilliz.com> * Update README.md (#1258) Add Jenkins master badge. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md (#1265) Fix indentation. * support CPU profiling (#1251) * #1250 support CPU profiling Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * #1250 fix code coverage Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * Fix HNSW crash (#1262) * fix Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * update. Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * Add troubleshooting information for INSTALL.md and enhance readability (#1274) * Update INSTALL.md 1. Add new troubleshooting message; 2. Enhance readability. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Signed-off-by: Lutkin Wang <yamasite@qq.com> * Update INSTALL.md Add CentOS link. Signed-off-by: Lutkin Wang <yamasite@qq.com> * Create COMMUNITY.md (#1292) Signed-off-by: Lutkin Wang <yamasite@qq.com> * fix gtest * add copyright * fix gtest * MERGE_NOT_YET * fix lint Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com> Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com> Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com> Co-authored-by: Lutkin Wang <yamasite@qq.com> Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com> Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: Jin Hai <hai.jin@zilliz.com> Co-authored-by: shiyu22 <cshiyu22@gmail.com> * #1302 Get all record IDs in a segment by given a segment id * Remove query time ranges Signed-off-by: zhenwu <zw@zilliz.com> * #1295 let wal enable by default * fix cases Signed-off-by: zhenwu <zw@zilliz.com> * fix partition cases Signed-off-by: zhenwu <zw@zilliz.com> * [skip ci] update test_db * update * fix case bug Signed-off-by: zhenwu <zw@zilliz.com> * lint * fix test case failures * remove some code * Caiyd crud 1 (#1377) * fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix unittest build error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix build issue when enable profiling Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix hastable bug * update bloom filter * update * benchmark * update benchmark * update * update * remove wal record size Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * remove wal record size config Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * update apply deletes: switch to binary search * update sdk_simple Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update apply deletes: switch to binary search * add test_search_by_id Signed-off-by: zhenwu <zw@zilliz.com> * add more log * flush error with multi same ids Signed-off-by: zhenwu <zw@zilliz.com> * modify wal config Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * update * add binary search_by_id * fix case bug Signed-off-by: zhenwu <zw@zilliz.com> * update cases Signed-off-by: zhenwu <zw@zilliz.com> * fix unit test #1395 * improve merge performance * add uids_ for VectorIndex to improve search performance Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix error Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update * fix search * fix record num Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * refine code * refine code * Add get_vector_ids test cases (#1407) * fix order * add wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix wal case Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix invalid operation issue Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * fix bug Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * crud fix Signed-off-by: sahuang <xiaohaix@student.unimelb.edu.au> * add table info test cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Signed-off-by: JinHai-CN <hai.jin@zilliz.com> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add to compact case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add case and debug compact Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * test pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix cases Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update table_info case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update get vector ids case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * update case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * pdb test Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * pdb test Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add tests for get_vector_ids Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix case Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * add binary and ip Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix binary index Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * fix pdb Signed-off-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> * #1408 fix search result in-correct after DeleteById Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * add one case * delete failed segment * update serialize * update serialize * fix case Signed-off-by: zhenwu <zw@zilliz.com> * update * update case assertion Signed-off-by: zhenwu <zw@zilliz.com> * [skip ci] update config * change bloom filter msync flag to async * #1319 add more timing debug info Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * update * update * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * add normalize Signed-off-by: zhenwu <zw@zilliz.com> * Fix compiling error Signed-off-by: jinhai <hai.jin@zilliz.com> * support ip (#1383) * support ip Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * IP result distance sort by descend Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * update Signed-off-by: Nicky <nicky.xj.lin@gmail.com> * format Signed-off-by: xiaojun.lin <xiaojun.lin@zilliz.com> * get table lsn * Remove unused third party Signed-off-by: jinhai <hai.jin@zilliz.com> * Refine code Signed-off-by: jinhai <hai.jin@zilliz.com> * #1319 fix clang format Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * fix wal applied lsn Signed-off-by: shengjun.li <shengjun.li@zilliz.com> * validate partition tag * #1319 improve search performance Signed-off-by: yudong.cai <yudong.cai@zilliz.com> * build error Co-authored-by: Zhiru Zhu <youny626@hotmail.com> Co-authored-by: groot <yihua.mo@zilliz.com> Co-authored-by: Xiaohai Xu <xiaohaix@student.unimelb.edu.au> Co-authored-by: shengjh <46514371+shengjh@users.noreply.github.com> Co-authored-by: del-zhenwu <56623710+del-zhenwu@users.noreply.github.com> Co-authored-by: shengjun.li <49774184+shengjun1985@users.noreply.github.com> Co-authored-by: Cai Yudong <yudong.cai@zilliz.com> Co-authored-by: quicksilver <zhifeng.zhang@zilliz.com> Co-authored-by: BossZou <40255591+BossZou@users.noreply.github.com> Co-authored-by: jielinxu <52057195+jielinxu@users.noreply.github.com> Co-authored-by: JackLCL <53512883+JackLCL@users.noreply.github.com> Co-authored-by: Tinkerrr <linxiaojun.cn@outlook.com> Co-authored-by: Lutkin Wang <yamasite@qq.com> Co-authored-by: ABNER-1 <ABNER-1@users.noreply.github.com> Co-authored-by: shiyu22 <cshiyu22@gmail.com>
436 lines
17 KiB
C++
436 lines
17 KiB
C++
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
#include <fiu-local.h>
|
|
#include <algorithm>
|
|
#include <memory>
|
|
#include <string>
|
|
#include <thread>
|
|
#include <utility>
|
|
|
|
#include "db/Utils.h"
|
|
#include "db/engine/EngineFactory.h"
|
|
#include "metrics/Metrics.h"
|
|
#include "scheduler/SchedInst.h"
|
|
#include "scheduler/job/SearchJob.h"
|
|
#include "scheduler/task/SearchTask.h"
|
|
#include "segment/SegmentReader.h"
|
|
#include "utils/Log.h"
|
|
#include "utils/TimeRecorder.h"
|
|
#include "utils/ValidationUtil.h"
|
|
|
|
namespace milvus {
|
|
namespace scheduler {
|
|
|
|
static constexpr size_t PARALLEL_REDUCE_THRESHOLD = 10000;
|
|
static constexpr size_t PARALLEL_REDUCE_BATCH = 1000;
|
|
|
|
// TODO(wxyu): remove unused code
|
|
// bool
|
|
// NeedParallelReduce(uint64_t nq, uint64_t topk) {
|
|
// server::ServerConfig &config = server::ServerConfig::GetInstance();
|
|
// server::ConfigNode &db_config = config.GetConfig(server::CONFIG_DB);
|
|
// bool need_parallel = db_config.GetBoolValue(server::CONFIG_DB_PARALLEL_REDUCE, false);
|
|
// if (!need_parallel) {
|
|
// return false;
|
|
// }
|
|
//
|
|
// return nq * topk >= PARALLEL_REDUCE_THRESHOLD;
|
|
//}
|
|
//
|
|
// void
|
|
// ParallelReduce(std::function<void(size_t, size_t)> &reduce_function, size_t max_index) {
|
|
// size_t reduce_batch = PARALLEL_REDUCE_BATCH;
|
|
//
|
|
// auto thread_count = std::thread::hardware_concurrency() - 1; //not all core do this work
|
|
// if (thread_count > 0) {
|
|
// reduce_batch = max_index / thread_count + 1;
|
|
// }
|
|
// ENGINE_LOG_DEBUG << "use " << thread_count <<
|
|
// " thread parallelly do reduce, each thread process " << reduce_batch << " vectors";
|
|
//
|
|
// std::vector<std::shared_ptr<std::thread> > thread_array;
|
|
// size_t from_index = 0;
|
|
// while (from_index < max_index) {
|
|
// size_t to_index = from_index + reduce_batch;
|
|
// if (to_index > max_index) {
|
|
// to_index = max_index;
|
|
// }
|
|
//
|
|
// auto reduce_thread = std::make_shared<std::thread>(reduce_function, from_index, to_index);
|
|
// thread_array.push_back(reduce_thread);
|
|
//
|
|
// from_index = to_index;
|
|
// }
|
|
//
|
|
// for (auto &thread_ptr : thread_array) {
|
|
// thread_ptr->join();
|
|
// }
|
|
//}
|
|
|
|
void
|
|
CollectFileMetrics(int file_type, size_t file_size) {
|
|
server::MetricsBase& inst = server::Metrics::GetInstance();
|
|
switch (file_type) {
|
|
case TableFileSchema::RAW:
|
|
case TableFileSchema::TO_INDEX: {
|
|
inst.RawFileSizeHistogramObserve(file_size);
|
|
inst.RawFileSizeTotalIncrement(file_size);
|
|
inst.RawFileSizeGaugeSet(file_size);
|
|
break;
|
|
}
|
|
default: {
|
|
inst.IndexFileSizeHistogramObserve(file_size);
|
|
inst.IndexFileSizeTotalIncrement(file_size);
|
|
inst.IndexFileSizeGaugeSet(file_size);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
XSearchTask::XSearchTask(const std::shared_ptr<server::Context>& context, TableFileSchemaPtr file, TaskLabelPtr label)
|
|
: Task(TaskType::SearchTask, std::move(label)), context_(context), file_(file) {
|
|
if (file_) {
|
|
// distance -- value 0 means two vectors equal, ascending reduce, L2/HAMMING/JACCARD/TONIMOTO ...
|
|
// similarity -- infinity value means two vectors equal, descending reduce, IP
|
|
if (file_->metric_type_ == static_cast<int>(MetricType::IP)) {
|
|
ascending_reduce = false;
|
|
}
|
|
|
|
EngineType engine_type;
|
|
if (file->file_type_ == TableFileSchema::FILE_TYPE::RAW ||
|
|
file->file_type_ == TableFileSchema::FILE_TYPE::TO_INDEX ||
|
|
file->file_type_ == TableFileSchema::FILE_TYPE::BACKUP) {
|
|
engine_type = server::ValidationUtil::IsBinaryMetricType(file->metric_type_) ? EngineType::FAISS_BIN_IDMAP
|
|
: EngineType::FAISS_IDMAP;
|
|
} else {
|
|
engine_type = (EngineType)file->engine_type_;
|
|
}
|
|
|
|
index_engine_ = EngineFactory::Build(file_->dimension_, file_->location_, engine_type,
|
|
(MetricType)file_->metric_type_, file_->nlist_);
|
|
}
|
|
}
|
|
|
|
void
|
|
XSearchTask::Load(LoadType type, uint8_t device_id) {
|
|
auto load_ctx = context_->Follower("XSearchTask::Load " + std::to_string(file_->id_));
|
|
|
|
TimeRecorder rc("");
|
|
Status stat = Status::OK();
|
|
std::string error_msg;
|
|
std::string type_str;
|
|
|
|
try {
|
|
fiu_do_on("XSearchTask.Load.throw_std_exception", throw std::exception());
|
|
if (type == LoadType::DISK2CPU) {
|
|
stat = index_engine_->Load();
|
|
type_str = "DISK2CPU";
|
|
} else if (type == LoadType::CPU2GPU) {
|
|
bool hybrid = false;
|
|
if (index_engine_->IndexEngineType() == engine::EngineType::FAISS_IVFSQ8H) {
|
|
hybrid = true;
|
|
}
|
|
stat = index_engine_->CopyToGpu(device_id, hybrid);
|
|
type_str = "CPU2GPU:" + std::to_string(device_id);
|
|
} else if (type == LoadType::GPU2CPU) {
|
|
stat = index_engine_->CopyToCpu();
|
|
type_str = "GPU2CPU";
|
|
} else {
|
|
error_msg = "Wrong load type";
|
|
stat = Status(SERVER_UNEXPECTED_ERROR, error_msg);
|
|
}
|
|
} catch (std::exception& ex) {
|
|
// typical error: out of disk space or permition denied
|
|
error_msg = "Failed to load index file: " + std::string(ex.what());
|
|
stat = Status(SERVER_UNEXPECTED_ERROR, error_msg);
|
|
}
|
|
fiu_do_on("XSearchTask.Load.out_of_memory", stat = Status(SERVER_UNEXPECTED_ERROR, "out of memory"));
|
|
|
|
if (!stat.ok()) {
|
|
Status s;
|
|
if (stat.ToString().find("out of memory") != std::string::npos) {
|
|
error_msg = "out of memory: " + type_str;
|
|
s = Status(SERVER_OUT_OF_MEMORY, error_msg);
|
|
} else {
|
|
error_msg = "Failed to load index file: " + type_str;
|
|
s = Status(SERVER_UNEXPECTED_ERROR, error_msg);
|
|
}
|
|
|
|
if (auto job = job_.lock()) {
|
|
auto search_job = std::static_pointer_cast<scheduler::SearchJob>(job);
|
|
search_job->SearchDone(file_->id_);
|
|
search_job->GetStatus() = s;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
size_t file_size = index_engine_->PhysicalSize();
|
|
|
|
std::string info = "Search task load file id:" + std::to_string(file_->id_) + " " + type_str +
|
|
" file type:" + std::to_string(file_->file_type_) + " size:" + std::to_string(file_size) +
|
|
" bytes from location: " + file_->location_ + " totally cost";
|
|
double span = rc.ElapseFromBegin(info);
|
|
// for (auto &context : search_contexts_) {
|
|
// context->AccumLoadCost(span);
|
|
// }
|
|
|
|
CollectFileMetrics(file_->file_type_, file_size);
|
|
|
|
// step 2: return search task for later execution
|
|
index_id_ = file_->id_;
|
|
index_type_ = file_->file_type_;
|
|
// search_contexts_.swap(search_contexts_);
|
|
|
|
load_ctx->GetTraceContext()->GetSpan()->Finish();
|
|
}
|
|
|
|
void
|
|
XSearchTask::Execute() {
|
|
auto execute_ctx = context_->Follower("XSearchTask::Execute " + std::to_string(index_id_));
|
|
|
|
if (index_engine_ == nullptr) {
|
|
return;
|
|
}
|
|
|
|
// ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_ << " with "
|
|
// << search_contexts_.size() << " tasks";
|
|
|
|
TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
|
|
|
|
server::CollectDurationMetrics metrics(index_type_);
|
|
|
|
std::vector<int64_t> output_ids;
|
|
std::vector<float> output_distance;
|
|
|
|
if (auto job = job_.lock()) {
|
|
auto search_job = std::static_pointer_cast<scheduler::SearchJob>(job);
|
|
// step 1: allocate memory
|
|
uint64_t nq = search_job->nq();
|
|
uint64_t topk = search_job->topk();
|
|
uint64_t nprobe = search_job->nprobe();
|
|
const engine::VectorsData& vectors = search_job->vectors();
|
|
|
|
output_ids.resize(topk * nq);
|
|
output_distance.resize(topk * nq);
|
|
std::string hdr =
|
|
"job " + std::to_string(search_job->id()) + " nq " + std::to_string(nq) + " topk " + std::to_string(topk);
|
|
|
|
try {
|
|
fiu_do_on("XSearchTask.Execute.throw_std_exception", throw std::exception());
|
|
// step 2: search
|
|
bool hybrid = false;
|
|
if (index_engine_->IndexEngineType() == engine::EngineType::FAISS_IVFSQ8H &&
|
|
ResMgrInst::GetInstance()->GetResource(path().Last())->type() == ResourceType::CPU) {
|
|
hybrid = true;
|
|
}
|
|
Status s;
|
|
if (!vectors.float_data_.empty()) {
|
|
s = index_engine_->Search(nq, vectors.float_data_.data(), topk, nprobe, output_distance.data(),
|
|
output_ids.data(), hybrid);
|
|
} else if (!vectors.binary_data_.empty()) {
|
|
s = index_engine_->Search(nq, vectors.binary_data_.data(), topk, nprobe, output_distance.data(),
|
|
output_ids.data(), hybrid);
|
|
} else if (!vectors.id_array_.empty()) {
|
|
s = index_engine_->Search(nq, vectors.id_array_, topk, nprobe, output_distance.data(),
|
|
output_ids.data(), hybrid);
|
|
}
|
|
|
|
fiu_do_on("XSearchTask.Execute.search_fail", s = Status(SERVER_UNEXPECTED_ERROR, ""));
|
|
|
|
if (!s.ok()) {
|
|
search_job->GetStatus() = s;
|
|
search_job->SearchDone(index_id_);
|
|
return;
|
|
}
|
|
|
|
double span = rc.RecordSection(hdr + ", do search");
|
|
// search_job->AccumSearchCost(span);
|
|
|
|
// step 3: pick up topk result
|
|
auto spec_k = file_->row_count_ < topk ? file_->row_count_ : topk;
|
|
if (search_job->GetResultIds().front() == -1 && search_job->GetResultIds().size() > spec_k) {
|
|
// initialized results set
|
|
search_job->GetResultIds().resize(spec_k);
|
|
search_job->GetResultDistances().resize(spec_k);
|
|
}
|
|
{
|
|
std::unique_lock<std::mutex> lock(search_job->mutex());
|
|
XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, ascending_reduce,
|
|
search_job->GetResultIds(), search_job->GetResultDistances());
|
|
}
|
|
|
|
span = rc.RecordSection(hdr + ", reduce topk");
|
|
// search_job->AccumReduceCost(span);
|
|
} catch (std::exception& ex) {
|
|
ENGINE_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
|
|
// search_job->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
|
|
}
|
|
|
|
// step 4: notify to send result to client
|
|
search_job->SearchDone(index_id_);
|
|
}
|
|
|
|
rc.ElapseFromBegin("totally cost");
|
|
|
|
// release index in resource
|
|
index_engine_ = nullptr;
|
|
|
|
execute_ctx->GetTraceContext()->GetSpan()->Finish();
|
|
}
|
|
|
|
void
|
|
XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances,
|
|
size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids,
|
|
scheduler::ResultDistances& tar_distances) {
|
|
if (src_ids.empty()) {
|
|
return;
|
|
}
|
|
|
|
size_t tar_k = tar_ids.size() / nq;
|
|
size_t buf_k = std::min(topk, src_k + tar_k);
|
|
|
|
scheduler::ResultIds buf_ids(nq * buf_k, -1);
|
|
scheduler::ResultDistances buf_distances(nq * buf_k, 0.0);
|
|
|
|
for (uint64_t i = 0; i < nq; i++) {
|
|
size_t buf_k_j = 0, src_k_j = 0, tar_k_j = 0;
|
|
size_t buf_idx, src_idx, tar_idx;
|
|
|
|
size_t buf_k_multi_i = buf_k * i;
|
|
size_t src_k_multi_i = topk * i;
|
|
size_t tar_k_multi_i = tar_k * i;
|
|
|
|
while (buf_k_j < buf_k && src_k_j < src_k && tar_k_j < tar_k) {
|
|
src_idx = src_k_multi_i + src_k_j;
|
|
tar_idx = tar_k_multi_i + tar_k_j;
|
|
buf_idx = buf_k_multi_i + buf_k_j;
|
|
|
|
if ((tar_ids[tar_idx] == -1) || // initialized value
|
|
(ascending && src_distances[src_idx] < tar_distances[tar_idx]) ||
|
|
(!ascending && src_distances[src_idx] > tar_distances[tar_idx])) {
|
|
buf_ids[buf_idx] = src_ids[src_idx];
|
|
buf_distances[buf_idx] = src_distances[src_idx];
|
|
src_k_j++;
|
|
} else {
|
|
buf_ids[buf_idx] = tar_ids[tar_idx];
|
|
buf_distances[buf_idx] = tar_distances[tar_idx];
|
|
tar_k_j++;
|
|
}
|
|
buf_k_j++;
|
|
}
|
|
|
|
if (buf_k_j < buf_k) {
|
|
if (src_k_j < src_k) {
|
|
while (buf_k_j < buf_k && src_k_j < src_k) {
|
|
buf_idx = buf_k_multi_i + buf_k_j;
|
|
src_idx = src_k_multi_i + src_k_j;
|
|
buf_ids[buf_idx] = src_ids[src_idx];
|
|
buf_distances[buf_idx] = src_distances[src_idx];
|
|
src_k_j++;
|
|
buf_k_j++;
|
|
}
|
|
} else {
|
|
while (buf_k_j < buf_k && tar_k_j < tar_k) {
|
|
buf_idx = buf_k_multi_i + buf_k_j;
|
|
tar_idx = tar_k_multi_i + tar_k_j;
|
|
buf_ids[buf_idx] = tar_ids[tar_idx];
|
|
buf_distances[buf_idx] = tar_distances[tar_idx];
|
|
tar_k_j++;
|
|
buf_k_j++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
tar_ids.swap(buf_ids);
|
|
tar_distances.swap(buf_distances);
|
|
}
|
|
|
|
const std::string&
|
|
XSearchTask::GetLocation() const {
|
|
return file_->location_;
|
|
}
|
|
|
|
size_t
|
|
XSearchTask::GetIndexId() const {
|
|
return file_->id_;
|
|
}
|
|
|
|
// void
|
|
// XSearchTask::MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,
|
|
// const std::vector<int64_t>& src_ids, const std::vector<float>& src_distance,
|
|
// uint64_t src_input_k, uint64_t nq, uint64_t topk, bool ascending) {
|
|
// if (src_ids.empty() || src_distance.empty()) {
|
|
// return;
|
|
// }
|
|
//
|
|
// uint64_t output_k = std::min(topk, tar_input_k + src_input_k);
|
|
// std::vector<int64_t> id_buf(nq * output_k, -1);
|
|
// std::vector<float> dist_buf(nq * output_k, 0.0);
|
|
//
|
|
// uint64_t buf_k, src_k, tar_k;
|
|
// uint64_t src_idx, tar_idx, buf_idx;
|
|
// uint64_t src_input_k_multi_i, tar_input_k_multi_i, buf_k_multi_i;
|
|
//
|
|
// for (uint64_t i = 0; i < nq; i++) {
|
|
// src_input_k_multi_i = src_input_k * i;
|
|
// tar_input_k_multi_i = tar_input_k * i;
|
|
// buf_k_multi_i = output_k * i;
|
|
// buf_k = src_k = tar_k = 0;
|
|
// while (buf_k < output_k && src_k < src_input_k && tar_k < tar_input_k) {
|
|
// src_idx = src_input_k_multi_i + src_k;
|
|
// tar_idx = tar_input_k_multi_i + tar_k;
|
|
// buf_idx = buf_k_multi_i + buf_k;
|
|
// if ((ascending && src_distance[src_idx] < tar_distance[tar_idx]) ||
|
|
// (!ascending && src_distance[src_idx] > tar_distance[tar_idx])) {
|
|
// id_buf[buf_idx] = src_ids[src_idx];
|
|
// dist_buf[buf_idx] = src_distance[src_idx];
|
|
// src_k++;
|
|
// } else {
|
|
// id_buf[buf_idx] = tar_ids[tar_idx];
|
|
// dist_buf[buf_idx] = tar_distance[tar_idx];
|
|
// tar_k++;
|
|
// }
|
|
// buf_k++;
|
|
// }
|
|
//
|
|
// if (buf_k < output_k) {
|
|
// if (src_k < src_input_k) {
|
|
// while (buf_k < output_k && src_k < src_input_k) {
|
|
// src_idx = src_input_k_multi_i + src_k;
|
|
// buf_idx = buf_k_multi_i + buf_k;
|
|
// id_buf[buf_idx] = src_ids[src_idx];
|
|
// dist_buf[buf_idx] = src_distance[src_idx];
|
|
// src_k++;
|
|
// buf_k++;
|
|
// }
|
|
// } else {
|
|
// while (buf_k < output_k && tar_k < tar_input_k) {
|
|
// tar_idx = tar_input_k_multi_i + tar_k;
|
|
// buf_idx = buf_k_multi_i + buf_k;
|
|
// id_buf[buf_idx] = tar_ids[tar_idx];
|
|
// dist_buf[buf_idx] = tar_distance[tar_idx];
|
|
// tar_k++;
|
|
// buf_k++;
|
|
// }
|
|
// }
|
|
// }
|
|
// }
|
|
//
|
|
// tar_ids.swap(id_buf);
|
|
// tar_distance.swap(dist_buf);
|
|
// tar_input_k = output_k;
|
|
//}
|
|
|
|
} // namespace scheduler
|
|
} // namespace milvus
|