diff --git a/.gitignore b/.gitignore index 6b2d6fc97b..8fda9f2980 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,6 @@ cmake_build *.lo *.tar.gz *.log +.coverage +*.pyc +cov_html/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d46ed6070..906799bcfe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,12 @@ Please mark all change in change log and use the ticket from JIRA. # Milvus 0.6.0 (TODO) ## Bug +- \#246 - Exclude src/external folder from code coverage for jenkin ci ## Feature - \#12 - Pure CPU version for Milvus +- \#77 - Support table partition +- \#226 - Experimental shards middleware for Milvus ## Improvement @@ -84,7 +87,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-658 - Fix SQ8 Hybrid can't search - MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources - \#9 - Change default gpu_cache_capacity to 4 -- \#20 - C++ sdk example get grpc error +- \#20 - C++ sdk example get grpc error - \#23 - Add unittest to improve code coverage - \#31 - make clang-format failed after run build.sh -l - \#39 - Create SQ8H index hang if using github server version @@ -136,7 +139,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-635 - Add compile option to support customized faiss - MS-660 - add ubuntu_build_deps.sh - \#18 - Add all test cases - + # Milvus 0.4.0 (2019-09-12) ## Bug @@ -345,11 +348,11 @@ Please mark all change in change log and use the ticket from JIRA. - MS-82 - Update server startup welcome message - MS-83 - Update vecwise to Milvus - MS-77 - Performance issue of post-search action -- MS-22 - Enhancement for MemVector size control +- MS-22 - Enhancement for MemVector size control - MS-92 - Unify behavior of debug and release build - MS-98 - Install all unit test to installation directory - MS-115 - Change is_startup of metric_config switch from true to on -- MS-122 - Archive criteria config +- MS-122 - Archive criteria config - MS-124 - HasTable interface - MS-126 - Add more error code - MS-128 - Change default db path diff --git a/ci/jenkins/scripts/coverage.sh b/ci/jenkins/scripts/coverage.sh index 07ab210d2f..5c9d010d46 100755 --- a/ci/jenkins/scripts/coverage.sh +++ b/ci/jenkins/scripts/coverage.sh @@ -132,8 +132,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/src/external/easyloggingpp/easylogging++.h" \ - "*/src/external/easyloggingpp/easylogging++.cc" + "*/src/external/*" if [ $? -ne 0 ]; then echo "gen ${FILE_INFO_OUTPUT_NEW} failed" diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index a9351ff820..7b6a115527 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -31,16 +31,16 @@ GET_CURRENT_TIME(BUILD_TIME) string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME}) message(STATUS "Build time = ${BUILD_TIME}") -MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME) +MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME) execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - if(GIT_BRANCH_NAME STREQUAL "") + if (GIT_BRANCH_NAME STREQUAL "") execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - endif() -ENDMACRO (GET_GIT_BRANCH_NAME) + endif () +ENDMACRO(GET_GIT_BRANCH_NAME) GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME) message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}") -if(NOT GIT_BRANCH_NAME STREQUAL "") +if (NOT GIT_BRANCH_NAME STREQUAL "") string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME}) endif () @@ -69,7 +69,7 @@ if (MILVUS_VERSION_MAJOR STREQUAL "" OR MILVUS_VERSION_PATCH STREQUAL "") message(WARNING "Failed to determine Milvus version from git branch name") set(MILVUS_VERSION "0.6.0") -endif() +endif () message(STATUS "Build version = ${MILVUS_VERSION}") configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/config.h @ONLY) @@ -118,17 +118,17 @@ include(DefineOptions) include(BuildUtils) include(ThirdPartyPackages) -set(MILVUS_GPU_VERSION false) -if (MILVUS_CPU_VERSION) - message(STATUS "Building Milvus CPU version") - add_compile_definitions("MILVUS_CPU_VERSION") -else () +set(MILVUS_CPU_VERSION false) +if (MILVUS_GPU_VERSION) message(STATUS "Building Milvus GPU version") - set(MILVUS_GPU_VERSION true) add_compile_definitions("MILVUS_GPU_VERSION") enable_language(CUDA) find_package(CUDA 10 REQUIRED) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +else () + message(STATUS "Building Milvus CPU version") + set(MILVUS_CPU_VERSION true) + add_compile_definitions("MILVUS_CPU_VERSION") endif () if (CMAKE_BUILD_TYPE STREQUAL "Release") diff --git a/core/build.sh b/core/build.sh index 819278b94a..e844528ad3 100755 --- a/core/build.sh +++ b/core/build.sh @@ -12,7 +12,7 @@ USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" CUSTOMIZATION="OFF" # default use ori faiss CUDA_COMPILER=/usr/local/cuda/bin/nvcc -CPU_VERSION="OFF" +GPU_VERSION="OFF" #defaults to CPU version WITH_MKL="OFF" CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}" @@ -51,7 +51,7 @@ do c) BUILD_COVERAGE="ON" ;; - g) + z) PROFILING="ON" ;; j) @@ -60,8 +60,8 @@ do x) CUSTOMIZATION="OFF" # force use ori faiss ;; - z) - CPU_VERSION="ON" + g) + GPU_VERSION="ON" ;; m) WITH_MKL="ON" @@ -77,14 +77,14 @@ parameter: -l: run cpplint, clang-format and clang-tidy(default: OFF) -r: remove previous build directory(default: OFF) -c: code coverage(default: OFF) --g: profiling(default: OFF) +-z: profiling(default: OFF) -j: use jfrog cache build directory(default: OFF) --z: build pure CPU version(default: OFF) +-g: build GPU version(default: OFF) -m: build with MKL(default: OFF) -h: help usage: -./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-z] [-m] [-h] +./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h] " exit 0 ;; @@ -116,7 +116,7 @@ CMAKE_CMD="cmake \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DCUSTOMIZATION=${CUSTOMIZATION} \ -DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ --DMILVUS_CPU_VERSION=${CPU_VERSION} \ +-DMILVUS_GPU_VERSION=${GPU_VERSION} \ -DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ ../" echo ${CMAKE_CMD} diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index c7f4f73d94..6e05a12dd2 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -13,16 +13,16 @@ macro(define_option name description default) endmacro() function(list_join lst glue out) - if("${${lst}}" STREQUAL "") + if ("${${lst}}" STREQUAL "") set(${out} "" PARENT_SCOPE) return() - endif() + endif () list(GET ${lst} 0 joined) list(REMOVE_AT ${lst} 0) - foreach(item ${${lst}}) + foreach (item ${${lst}}) set(joined "${joined}${glue}${item}") - endforeach() + endforeach () set(${out} ${joined} PARENT_SCOPE) endfunction() @@ -35,15 +35,15 @@ macro(define_option_string name description default) set("${name}_OPTION_ENUM" ${ARGN}) list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM") - if(NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) + if (NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) set_property(CACHE ${name} PROPERTY STRINGS ${ARGN}) - endif() + endif () endmacro() #---------------------------------------------------------------------- -set_option_category("CPU version") +set_option_category("GPU version") -define_option(MILVUS_CPU_VERSION "Build CPU version only" OFF) +define_option(MILVUS_GPU_VERSION "Build GPU version" OFF) #---------------------------------------------------------------------- set_option_category("Thirdparty") @@ -51,11 +51,11 @@ set_option_category("Thirdparty") set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO") define_option_string(MILVUS_DEPENDENCY_SOURCE - "Method to use for acquiring MILVUS's build dependencies" - "${MILVUS_DEPENDENCY_SOURCE_DEFAULT}" - "AUTO" - "BUNDLED" - "SYSTEM") + "Method to use for acquiring MILVUS's build dependencies" + "${MILVUS_DEPENDENCY_SOURCE_DEFAULT}" + "AUTO" + "BUNDLED" + "SYSTEM") define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD "Show output from ExternalProjects rather than just logging to files" ON) @@ -75,33 +75,21 @@ define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON) if (MILVUS_ENABLE_PROFILING STREQUAL "ON") define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON) define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON) -endif() +endif () define_option(MILVUS_WITH_GRPC "Build with GRPC" ON) define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON) -#---------------------------------------------------------------------- -if(MSVC) - set_option_category("MSVC") - - define_option(MSVC_LINK_VERBOSE - "Pass verbose linking options when linking libraries and executables" - OFF) - - define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF) -endif() - - #---------------------------------------------------------------------- set_option_category("Test and benchmark") unset(MILVUS_BUILD_TESTS CACHE) if (BUILD_UNIT_TEST) define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON) -else() +else () define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF) -endif(BUILD_UNIT_TEST) +endif (BUILD_UNIT_TEST) #---------------------------------------------------------------------- macro(config_summary) @@ -113,12 +101,12 @@ macro(config_summary) message(STATUS " Generator: ${CMAKE_GENERATOR}") message(STATUS " Build type: ${CMAKE_BUILD_TYPE}") message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}") - if(${CMAKE_EXPORT_COMPILE_COMMANDS}) + if (${CMAKE_EXPORT_COMPILE_COMMANDS}) message( STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json") - endif() + endif () - foreach(category ${MILVUS_OPTION_CATEGORIES}) + foreach (category ${MILVUS_OPTION_CATEGORIES}) message(STATUS) message(STATUS "${category} options:") @@ -126,50 +114,50 @@ macro(config_summary) set(option_names ${MILVUS_${category}_OPTION_NAMES}) set(max_value_length 0) - foreach(name ${option_names}) + foreach (name ${option_names}) string(LENGTH "\"${${name}}\"" value_length) - if(${max_value_length} LESS ${value_length}) + if (${max_value_length} LESS ${value_length}) set(max_value_length ${value_length}) - endif() - endforeach() + endif () + endforeach () - foreach(name ${option_names}) - if("${${name}_OPTION_TYPE}" STREQUAL "string") + foreach (name ${option_names}) + if ("${${name}_OPTION_TYPE}" STREQUAL "string") set(value "\"${${name}}\"") - else() + else () set(value "${${name}}") - endif() + endif () set(default ${${name}_OPTION_DEFAULT}) set(description ${${name}_OPTION_DESCRIPTION}) string(LENGTH ${description} description_length) - if(${description_length} LESS 70) + if (${description_length} LESS 70) string( SUBSTRING " " ${description_length} -1 description_padding) - else() + else () set(description_padding " ") - endif() + endif () set(comment "[${name}]") - if("${value}" STREQUAL "${default}") + if ("${value}" STREQUAL "${default}") set(comment "[default] ${comment}") - endif() + endif () - if(NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) + if (NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) set(comment "${comment} [${${name}_OPTION_ENUM}]") - endif() + endif () string( SUBSTRING "${value} " 0 ${max_value_length} value) message(STATUS " ${description} ${description_padding} ${value} ${comment}") - endforeach() + endforeach () - endforeach() + endforeach () endmacro() diff --git a/core/conf/server_config.template b/core/conf/server_config.template index bee0a67b27..8fc31366e3 100644 --- a/core/conf/server_config.template +++ b/core/conf/server_config.template @@ -32,9 +32,9 @@ cache_config: cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] cache_insert_data: false # whether to load inserted data into cache, must be a boolean -# Skip the following config if you are using GPU version - gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer - gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] +#Uncomment the following config if you are using GPU version +# gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer +# gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] engine_config: use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times @@ -44,5 +44,4 @@ engine_config: resource_config: search_resources: # define the devices used for search computation, must be in format: cpu or gpux - cpu - - gpu0 - index_build_device: gpu0 # CPU / GPU used for building index, must be in format: cpu / gpux + index_build_device: cpu # CPU / GPU used for building index, must be in format: cpu / gpux diff --git a/core/coverage.sh b/core/coverage.sh index 2cb0861de4..9011e290e5 100755 --- a/core/coverage.sh +++ b/core/coverage.sh @@ -122,8 +122,6 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/easylogging++.h" \ - "*/easylogging++.cc" \ "*/src/external/*" if [ $? -ne 0 ]; then diff --git a/core/migration/README.md b/core/migration/README.md new file mode 100644 index 0000000000..7c318c1393 --- /dev/null +++ b/core/migration/README.md @@ -0,0 +1,28 @@ +## Data Migration + +####0.3.x +legacy data is not migrate-able for later versions + +####0.4.x +legacy data can be reused directly by 0.5.x + +legacy data can be migrated to 0.6.x + +####0.5.x +legacy data can be migrated to 0.6.x + +####0.6.x +how to migrate legacy 0.4.x/0.5.x data + +for sqlite meta: +```shell + $ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql +``` + +for mysql meta: +```shell + $ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql +``` + + + diff --git a/core/migration/mysql_4_to_6.sql b/core/migration/mysql_4_to_6.sql new file mode 100644 index 0000000000..f8a5b1b70b --- /dev/null +++ b/core/migration/mysql_4_to_6.sql @@ -0,0 +1,4 @@ +alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL; +alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL; +alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL; +update Tables set version='0.6.0'; diff --git a/core/migration/sqlite_4_to_6.sql b/core/migration/sqlite_4_to_6.sql new file mode 100644 index 0000000000..2069145046 --- /dev/null +++ b/core/migration/sqlite_4_to_6.sql @@ -0,0 +1,4 @@ +alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL; +alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL; +alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL; +update Tables set version='0.6.0'; diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index 9e4065d646..79b5e0f1da 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -24,6 +24,9 @@ include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus) add_subdirectory(index) +if (BUILD_FAISS_WITH_MKL) + add_compile_definitions("WITH_MKL") +endif () set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE) foreach (dir ${INDEX_INCLUDE_DIRS}) diff --git a/core/src/db/DB.h b/core/src/db/DB.h index a790fadb50..09bbd4af45 100644 --- a/core/src/db/DB.h +++ b/core/src/db/DB.h @@ -47,43 +47,68 @@ class DB { virtual Status CreateTable(meta::TableSchema& table_schema_) = 0; + virtual Status - DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0; + DropTable(const std::string& table_id, const meta::DatesT& dates) = 0; + virtual Status DescribeTable(meta::TableSchema& table_schema_) = 0; + virtual Status HasTable(const std::string& table_id, bool& has_or_not_) = 0; + virtual Status AllTables(std::vector& table_schema_array) = 0; + virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0; + virtual Status PreloadTable(const std::string& table_id) = 0; + virtual Status UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; virtual Status - InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0; + CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) = 0; virtual Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) = 0; + DropPartition(const std::string& partition_name) = 0; virtual Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) = 0; + DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0; virtual Status - Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0; + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) = 0; + + virtual Status + InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids_) = 0; + + virtual Status + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0; + + virtual Status + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) = 0; + + virtual Status + QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) = 0; virtual Status Size(uint64_t& result) = 0; virtual Status CreateIndex(const std::string& table_id, const TableIndex& index) = 0; + virtual Status DescribeIndex(const std::string& table_id, TableIndex& index) = 0; + virtual Status DropIndex(const std::string& table_id) = 0; diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index 6995de3d14..3e0501b84e 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -30,6 +30,7 @@ #include "scheduler/job/DeleteJob.h" #include "scheduler/job/SearchJob.h" #include "utils/Log.h" +#include "utils/StringHelpFunctions.h" #include "utils/TimeRecorder.h" #include @@ -38,6 +39,7 @@ #include #include #include +#include #include namespace milvus { @@ -49,6 +51,17 @@ constexpr uint64_t METRIC_ACTION_INTERVAL = 1; constexpr uint64_t COMPACT_ACTION_INTERVAL = 1; constexpr uint64_t INDEX_ACTION_INTERVAL = 1; +static const Status SHUTDOWN_ERROR = Status(DB_ERROR, "Milsvus server is shutdown!"); + +void +TraverseFiles(const meta::DatePartionedTableFilesSchema& date_files, meta::TableFilesSchema& files_array) { + for (auto& day_files : date_files) { + for (auto& file : day_files.second) { + files_array.push_back(file); + } + } +} + } // namespace DBImpl::DBImpl(const DBOptions& options) @@ -113,7 +126,7 @@ DBImpl::DropAll() { Status DBImpl::CreateTable(meta::TableSchema& table_schema) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } meta::TableSchema temp_schema = table_schema; @@ -122,34 +135,18 @@ DBImpl::CreateTable(meta::TableSchema& table_schema) { } Status -DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) { +DBImpl::DropTable(const std::string& table_id, const meta::DatesT& dates) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - // dates partly delete files of the table but currently we don't support - ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id; - - if (dates.empty()) { - mem_mgr_->EraseMemVector(table_id); // not allow insert - meta_ptr_->DeleteTable(table_id); // soft delete table - - // scheduler will determine when to delete table files - auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); - scheduler::DeleteJobPtr job = std::make_shared(table_id, meta_ptr_, nres); - scheduler::JobMgrInst::GetInstance()->Put(job); - job->WaitAndDelete(); - } else { - meta_ptr_->DropPartitionsByDates(table_id, dates); - } - - return Status::OK(); + return DropTableRecursively(table_id, dates); } Status DBImpl::DescribeTable(meta::TableSchema& table_schema) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } auto stat = meta_ptr_->DescribeTable(table_schema); @@ -160,7 +157,7 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) { Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->HasTable(table_id, has_or_not); @@ -169,7 +166,7 @@ DBImpl::HasTable(const std::string& table_id, bool& has_or_not) { Status DBImpl::AllTables(std::vector& table_schema_array) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->AllTables(table_schema_array); @@ -178,55 +175,59 @@ DBImpl::AllTables(std::vector& table_schema_array) { Status DBImpl::PreloadTable(const std::string& table_id) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - meta::DatePartionedTableFilesSchema files; - - meta::DatesT dates; + // get all table files from parent table std::vector ids; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files); + meta::TableFilesSchema files_array; + auto status = GetFilesToSearch(table_id, ids, files_array); if (!status.ok()) { return status; } + // get files from partition tables + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = GetFilesToSearch(schema.table_id_, ids, files_array); + } + int64_t size = 0; int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity(); int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage(); int64_t available_size = cache_total - cache_usage; - for (auto& day_files : files) { - for (auto& file : day_files.second) { - ExecutionEnginePtr engine = - EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_, - (MetricType)file.metric_type_, file.nlist_); - if (engine == nullptr) { - ENGINE_LOG_ERROR << "Invalid engine type"; - return Status(DB_ERROR, "Invalid engine type"); - } + for (auto& file : files_array) { + ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_, + (MetricType)file.metric_type_, file.nlist_); + if (engine == nullptr) { + ENGINE_LOG_ERROR << "Invalid engine type"; + return Status(DB_ERROR, "Invalid engine type"); + } - size += engine->PhysicalSize(); - if (size > available_size) { - return Status(SERVER_CACHE_FULL, "Cache is full"); - } else { - try { - // step 1: load index - engine->Load(true); - } catch (std::exception& ex) { - std::string msg = "Pre-load table encounter exception: " + std::string(ex.what()); - ENGINE_LOG_ERROR << msg; - return Status(DB_ERROR, msg); - } + size += engine->PhysicalSize(); + if (size > available_size) { + return Status(SERVER_CACHE_FULL, "Cache is full"); + } else { + try { + // step 1: load index + engine->Load(true); + } catch (std::exception& ex) { + std::string msg = "Pre-load table encounter exception: " + std::string(ex.what()); + ENGINE_LOG_ERROR << msg; + return Status(DB_ERROR, msg); } } } + return Status::OK(); } Status DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->UpdateTableFlag(table_id, flag); @@ -235,34 +236,96 @@ DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - return meta_ptr_->Count(table_id, row_count); + return GetTableRowCountRecursively(table_id, row_count); } Status -DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) { - // ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache"; +DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } + return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag); +} + +Status +DBImpl::DropPartition(const std::string& partition_name) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + auto status = mem_mgr_->EraseMemVector(partition_name); // not allow insert + status = meta_ptr_->DropPartition(partition_name); // soft delete table + + // scheduler will determine when to delete table files + auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); + scheduler::DeleteJobPtr job = std::make_shared(partition_name, meta_ptr_, nres); + scheduler::JobMgrInst::GetInstance()->Put(job); + job->WaitAndDelete(); + + return Status::OK(); +} + +Status +DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + std::string partition_name; + auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name); + return DropPartition(partition_name); +} + +Status +DBImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + return meta_ptr_->ShowPartitions(table_id, partiton_schema_array); +} + +Status +DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids) { + // ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache"; + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + // if partition is specified, use partition as target table Status status; + std::string target_table_name = table_id; + if (!partition_tag.empty()) { + std::string partition_name; + status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name); + } + + // insert vectors into target table milvus::server::CollectInsertMetrics metrics(n, status); - status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids); + status = mem_mgr_->InsertVectors(target_table_name, n, vectors, vector_ids); return status; } Status DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + Status status; { std::unique_lock lock(build_index_mutex_); // step 1: check index difference TableIndex old_index; - auto status = DescribeIndex(table_id, old_index); + status = DescribeIndex(table_id, old_index); if (!status.ok()) { ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id; return status; @@ -272,11 +335,8 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { TableIndex new_index = index; new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable if (!utils::IsSameIndex(old_index, new_index)) { - DropIndex(table_id); - - status = meta_ptr_->UpdateTableIndex(table_id, new_index); + status = UpdateTableIndexRecursively(table_id, new_index); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id; return status; } } @@ -287,101 +347,91 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { WaitMergeFileFinish(); // step 4: wait and build index - // for IDMAP type, only wait all NEW file converted to RAW file - // for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files - std::vector file_types; - if (index.engine_type_ == static_cast(EngineType::FAISS_IDMAP)) { - file_types = { - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), - }; - } else { - file_types = { - static_cast(meta::TableFileSchema::RAW), - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), - static_cast(meta::TableFileSchema::NEW_INDEX), - static_cast(meta::TableFileSchema::TO_INDEX), - }; - } + status = BuildTableIndexRecursively(table_id, index); - std::vector file_ids; - auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids); - int times = 1; - - while (!file_ids.empty()) { - ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times; - if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) { - status = meta_ptr_->UpdateTableFilesToIndex(table_id); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100))); - status = meta_ptr_->FilesByType(table_id, file_types, file_ids); - times++; - } - - return Status::OK(); + return status; } Status DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + return meta_ptr_->DescribeTableIndex(table_id, index); } Status DBImpl::DropIndex(const std::string& table_id) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; - return meta_ptr_->DropTableIndex(table_id); + return DropTableIndexRecursively(table_id); } Status -DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) { +DBImpl::Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } meta::DatesT dates = {utils::GetDate()}; - Status result = Query(table_id, k, nq, nprobe, vectors, dates, results); - + Status result = Query(table_id, partition_tags, k, nq, nprobe, vectors, dates, result_ids, result_distances); return result; } Status -DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) { +DBImpl::Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size(); - // get all table files from table - meta::DatePartionedTableFilesSchema files; + Status status; std::vector ids; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files); - if (!status.ok()) { - return status; - } + meta::TableFilesSchema files_array; - meta::TableFilesSchema file_id_array; - for (auto& day_files : files) { - for (auto& file : day_files.second) { - file_id_array.push_back(file); + if (partition_tags.empty()) { + // no partition tag specified, means search in whole table + // get all table files from parent table + status = GetFilesToSearch(table_id, ids, files_array); + if (!status.ok()) { + return status; + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = GetFilesToSearch(schema.table_id_, ids, files_array); + } + } else { + // get files from specified partitions + std::set partition_name_array; + GetPartitionsByTags(table_id, partition_tags, partition_name_array); + + for (auto& partition_name : partition_name_array) { + status = GetFilesToSearch(partition_name, ids, files_array); } } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); + status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } Status -DBImpl::Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) { +DBImpl::QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size(); @@ -395,25 +445,18 @@ DBImpl::Query(const std::string& table_id, const std::vector& file_ ids.push_back(std::stoul(id, &sz)); } - meta::DatePartionedTableFilesSchema files_array; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array); + meta::TableFilesSchema files_array; + auto status = GetFilesToSearch(table_id, ids, files_array); if (!status.ok()) { return status; } - meta::TableFilesSchema file_id_array; - for (auto& day_files : files_array) { - for (auto& file : day_files.second) { - file_id_array.push_back(file); - } - } - - if (file_id_array.empty()) { + if (files_array.empty()) { return Status(DB_ERROR, "Invalid file id"); } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); + status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } @@ -421,7 +464,7 @@ DBImpl::Query(const std::string& table_id, const std::vector& file_ Status DBImpl::Size(uint64_t& result) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->Size(result); @@ -432,7 +475,7 @@ DBImpl::Size(uint64_t& result) { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, QueryResults& results) { + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) { server::CollectQueryMetrics metrics(nq); TimeRecorder rc(""); @@ -453,7 +496,8 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi } // step 3: construct results - results = job->GetResult(); + result_ids = job->GetResultIds(); + result_distances = job->GetResultDistances(); rc.ElapseFromBegin("Engine query totally cost"); return Status::OK(); @@ -772,5 +816,183 @@ DBImpl::BackgroundBuildIndex() { ENGINE_LOG_TRACE << "Background build index thread exit"; } +Status +DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector& file_ids, + meta::TableFilesSchema& files) { + meta::DatesT dates; + meta::DatePartionedTableFilesSchema date_files; + auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files); + if (!status.ok()) { + return status; + } + + TraverseFiles(date_files, files); + return Status::OK(); +} + +Status +DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, + std::set& partition_name_array) { + std::vector partiton_array; + auto status = meta_ptr_->ShowPartitions(table_id, partiton_array); + + for (auto& tag : partition_tags) { + for (auto& schema : partiton_array) { + if (server::StringHelpFunctions::IsRegexMatch(schema.partition_tag_, tag)) { + partition_name_array.insert(schema.table_id_); + } + } + } + + return Status::OK(); +} + +Status +DBImpl::DropTableRecursively(const std::string& table_id, const meta::DatesT& dates) { + // dates partly delete files of the table but currently we don't support + ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id; + + Status status; + if (dates.empty()) { + status = mem_mgr_->EraseMemVector(table_id); // not allow insert + status = meta_ptr_->DropTable(table_id); // soft delete table + + // scheduler will determine when to delete table files + auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); + scheduler::DeleteJobPtr job = std::make_shared(table_id, meta_ptr_, nres); + scheduler::JobMgrInst::GetInstance()->Put(job); + job->WaitAndDelete(); + } else { + status = meta_ptr_->DropDataByDate(table_id, dates); + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = DropTableRecursively(schema.table_id_, dates); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) { + DropIndex(table_id); + + auto status = meta_ptr_->UpdateTableIndex(table_id, index); + if (!status.ok()) { + ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id; + return status; + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = UpdateTableIndexRecursively(schema.table_id_, index); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index) { + // for IDMAP type, only wait all NEW file converted to RAW file + // for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files + std::vector file_types; + if (index.engine_type_ == static_cast(EngineType::FAISS_IDMAP)) { + file_types = { + static_cast(meta::TableFileSchema::NEW), + static_cast(meta::TableFileSchema::NEW_MERGE), + }; + } else { + file_types = { + static_cast(meta::TableFileSchema::RAW), + static_cast(meta::TableFileSchema::NEW), + static_cast(meta::TableFileSchema::NEW_MERGE), + static_cast(meta::TableFileSchema::NEW_INDEX), + static_cast(meta::TableFileSchema::TO_INDEX), + }; + } + + // get files to build index + std::vector file_ids; + auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids); + int times = 1; + + while (!file_ids.empty()) { + ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times; + if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) { + status = meta_ptr_->UpdateTableFilesToIndex(table_id); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100))); + status = meta_ptr_->FilesByType(table_id, file_types, file_ids); + times++; + } + + // build index for partition + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = BuildTableIndexRecursively(schema.table_id_, index); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::DropTableIndexRecursively(const std::string& table_id) { + ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; + auto status = meta_ptr_->DropTableIndex(table_id); + if (!status.ok()) { + return status; + } + + // drop partition index + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = DropTableIndexRecursively(schema.table_id_); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) { + row_count = 0; + auto status = meta_ptr_->Count(table_id, row_count); + if (!status.ok()) { + return status; + } + + // get partition row count + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + uint64_t partition_row_count = 0; + status = GetTableRowCountRecursively(schema.table_id_, partition_row_count); + if (!status.ok()) { + return status; + } + + row_count += partition_row_count; + } + + return Status::OK(); +} + } // namespace engine } // namespace milvus diff --git a/core/src/db/DBImpl.h b/core/src/db/DBImpl.h index e1e030cc32..932fc990e4 100644 --- a/core/src/db/DBImpl.h +++ b/core/src/db/DBImpl.h @@ -57,7 +57,7 @@ class DBImpl : public DB { CreateTable(meta::TableSchema& table_schema) override; Status - DeleteTable(const std::string& table_id, const meta::DatesT& dates) override; + DropTable(const std::string& table_id, const meta::DatesT& dates) override; Status DescribeTable(meta::TableSchema& table_schema) override; @@ -78,7 +78,21 @@ class DBImpl : public DB { GetTableRowCount(const std::string& table_id, uint64_t& row_count) override; Status - InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override; + CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) override; + + Status + DropPartition(const std::string& partition_name) override; + + Status + DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override; + + Status + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids) override; Status CreateIndex(const std::string& table_id, const TableIndex& index) override; @@ -90,16 +104,18 @@ class DBImpl : public DB { DropIndex(const std::string& table_id) override; Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) override; + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) override; Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) override; + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) override; Status - Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) override; + QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) override; Status Size(uint64_t& result) override; @@ -107,7 +123,7 @@ class DBImpl : public DB { private: Status QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, QueryResults& results); + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances); void BackgroundTimerTask(); @@ -136,6 +152,28 @@ class DBImpl : public DB { Status MemSerialize(); + Status + GetFilesToSearch(const std::string& table_id, const std::vector& file_ids, meta::TableFilesSchema& files); + + Status + GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, + std::set& partition_name_array); + + Status + DropTableRecursively(const std::string& table_id, const meta::DatesT& dates); + + Status + UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index); + + Status + BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index); + + Status + DropTableIndexRecursively(const std::string& table_id); + + Status + GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count); + private: const DBOptions options_; diff --git a/core/src/db/Types.h b/core/src/db/Types.h index 94528a9a8a..76c06126f8 100644 --- a/core/src/db/Types.h +++ b/core/src/db/Types.h @@ -19,6 +19,7 @@ #include "db/engine/ExecutionEngine.h" +#include #include #include #include @@ -30,8 +31,8 @@ typedef int64_t IDNumber; typedef IDNumber* IDNumberPtr; typedef std::vector IDNumbers; -typedef std::vector> QueryResult; -typedef std::vector QueryResults; +typedef std::vector ResultIds; +typedef std::vector ResultDistances; struct TableIndex { int32_t engine_type_ = (int)EngineType::FAISS_IDMAP; diff --git a/core/src/db/meta/Meta.h b/core/src/db/meta/Meta.h index ec4b66916d..f538bebce6 100644 --- a/core/src/db/meta/Meta.h +++ b/core/src/db/meta/Meta.h @@ -50,14 +50,11 @@ class Meta { virtual Status AllTables(std::vector& table_schema_array) = 0; - virtual Status - UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0; - virtual Status UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; virtual Status - DeleteTable(const std::string& table_id) = 0; + DropTable(const std::string& table_id) = 0; virtual Status DeleteTableFiles(const std::string& table_id) = 0; @@ -66,20 +63,41 @@ class Meta { CreateTableFile(TableFileSchema& file_schema) = 0; virtual Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) = 0; + DropDataByDate(const std::string& table_id, const DatesT& dates) = 0; virtual Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) = 0; - virtual Status - UpdateTableFilesToIndex(const std::string& table_id) = 0; - virtual Status UpdateTableFile(TableFileSchema& file_schema) = 0; virtual Status UpdateTableFiles(TableFilesSchema& files) = 0; + virtual Status + UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0; + + virtual Status + UpdateTableFilesToIndex(const std::string& table_id) = 0; + + virtual Status + DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0; + + virtual Status + DropTableIndex(const std::string& table_id) = 0; + + virtual Status + CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag) = 0; + + virtual Status + DropPartition(const std::string& partition_name) = 0; + + virtual Status + ShowPartitions(const std::string& table_name, std::vector& partiton_schema_array) = 0; + + virtual Status + GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0; + virtual Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, DatePartionedTableFilesSchema& files) = 0; @@ -87,12 +105,6 @@ class Meta { virtual Status FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) = 0; - virtual Status - Size(uint64_t& result) = 0; - - virtual Status - Archive() = 0; - virtual Status FilesToIndex(TableFilesSchema&) = 0; @@ -101,10 +113,10 @@ class Meta { std::vector& file_ids) = 0; virtual Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0; + Size(uint64_t& result) = 0; virtual Status - DropTableIndex(const std::string& table_id) = 0; + Archive() = 0; virtual Status CleanUp() = 0; diff --git a/core/src/db/meta/MetaTypes.h b/core/src/db/meta/MetaTypes.h index c973f3fdea..28f35e76fc 100644 --- a/core/src/db/meta/MetaTypes.h +++ b/core/src/db/meta/MetaTypes.h @@ -19,6 +19,7 @@ #include "db/Constants.h" #include "db/engine/ExecutionEngine.h" +#include "src/config.h" #include #include @@ -33,6 +34,7 @@ constexpr int32_t DEFAULT_ENGINE_TYPE = (int)EngineType::FAISS_IDMAP; constexpr int32_t DEFAULT_NLIST = 16384; constexpr int32_t DEFAULT_METRIC_TYPE = (int)MetricType::L2; constexpr int32_t DEFAULT_INDEX_FILE_SIZE = ONE_GB; +constexpr char CURRENT_VERSION[] = MILVUS_VERSION; constexpr int64_t FLAG_MASK_NO_USERID = 0x1; constexpr int64_t FLAG_MASK_HAS_USERID = 0x1 << 1; @@ -57,6 +59,9 @@ struct TableSchema { int32_t engine_type_ = DEFAULT_ENGINE_TYPE; int32_t nlist_ = DEFAULT_NLIST; int32_t metric_type_ = DEFAULT_METRIC_TYPE; + std::string owner_table_; + std::string partition_tag_; + std::string version_ = CURRENT_VERSION; }; // TableSchema struct TableFileSchema { diff --git a/core/src/db/meta/MySQLMetaImpl.cpp b/core/src/db/meta/MySQLMetaImpl.cpp index c7a054524c..ff36554c10 100644 --- a/core/src/db/meta/MySQLMetaImpl.cpp +++ b/core/src/db/meta/MySQLMetaImpl.cpp @@ -145,6 +145,10 @@ static const MetaSchema TABLES_SCHEMA(META_TABLES, { MetaField("engine_type", "INT", "DEFAULT 1 NOT NULL"), MetaField("nlist", "INT", "DEFAULT 16384 NOT NULL"), MetaField("metric_type", "INT", "DEFAULT 1 NOT NULL"), + MetaField("owner_table", "VARCHAR(255)", "NOT NULL"), + MetaField("partition_tag", "VARCHAR(255)", "NOT NULL"), + MetaField("version", "VARCHAR(64)", + std::string("DEFAULT '") + CURRENT_VERSION + "'"), }); // TableFiles schema @@ -294,7 +298,7 @@ MySQLMetaImpl::Initialize() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } if (!connectionPtr->thread_aware()) { @@ -328,9 +332,350 @@ MySQLMetaImpl::Initialize() { return Status::OK(); } +Status +MySQLMetaImpl::CreateTable(TableSchema& table_schema) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query createTableQuery = connectionPtr->query(); + + if (table_schema.table_id_.empty()) { + NextTableId(table_schema.table_id_); + } else { + createTableQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << table_schema.table_id_ << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + + mysqlpp::StoreQueryResult res = createTableQuery.store(); + + if (res.num_rows() == 1) { + int state = res[0]["state"]; + if (TableSchema::TO_DELETE == state) { + return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + } else { + return Status(DB_ALREADY_EXIST, "Table already exists"); + } + } + } + + table_schema.id_ = -1; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + + std::string id = "NULL"; // auto-increment + std::string& table_id = table_schema.table_id_; + std::string state = std::to_string(table_schema.state_); + std::string dimension = std::to_string(table_schema.dimension_); + std::string created_on = std::to_string(table_schema.created_on_); + std::string flag = std::to_string(table_schema.flag_); + std::string index_file_size = std::to_string(table_schema.index_file_size_); + std::string engine_type = std::to_string(table_schema.engine_type_); + std::string nlist = std::to_string(table_schema.nlist_); + std::string metric_type = std::to_string(table_schema.metric_type_); + std::string& owner_table = table_schema.owner_table_; + std::string& partition_tag = table_schema.partition_tag_; + std::string& version = table_schema.version_; + + createTableQuery << "INSERT INTO " << META_TABLES << " VALUES(" << id << ", " << mysqlpp::quote << table_id + << ", " << state << ", " << dimension << ", " << created_on << ", " << flag << ", " + << index_file_size << ", " << engine_type << ", " << nlist << ", " << metric_type << ", " + << mysqlpp::quote << owner_table << ", " << mysqlpp::quote << partition_tag << ", " + << mysqlpp::quote << version << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + + if (mysqlpp::SimpleResult res = createTableQuery.execute()) { + table_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? + + // Consume all results to avoid "Commands out of sync" error + } else { + return HandleException("Add Table Error", createTableQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; + return utils::CreateTablePath(options_, table_schema.table_id_); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what()); + } +} + +Status +MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query describeTableQuery = connectionPtr->query(); + describeTableQuery + << "SELECT id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type" + << " ,owner_table, partition_tag, version" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str(); + + res = describeTableQuery.store(); + } // Scoped Connection + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + table_schema.id_ = resRow["id"]; // implicit conversion + table_schema.state_ = resRow["state"]; + table_schema.dimension_ = resRow["dimension"]; + table_schema.created_on_ = resRow["created_on"]; + table_schema.flag_ = resRow["flag"]; + table_schema.index_file_size_ = resRow["index_file_size"]; + table_schema.engine_type_ = resRow["engine_type"]; + table_schema.nlist_ = resRow["nlist"]; + table_schema.metric_type_ = resRow["metric_type"]; + resRow["owner_table"].to_string(table_schema.owner_table_); + resRow["partition_tag"].to_string(table_schema.partition_tag_); + resRow["version"].to_string(table_schema.version_); + } else { + return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DESCRIBING TABLE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query hasTableQuery = connectionPtr->query(); + // since table_id is a unique column we just need to check whether it exists or not + hasTableQuery << "SELECT EXISTS" + << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ")" + << " AS " << mysqlpp::quote << "check" + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasTable: " << hasTableQuery.str(); + + res = hasTableQuery.store(); + } // Scoped Connection + + int check = res[0]["check"]; + has_or_not = (check == 1); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CHECKING IF TABLE EXISTS", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::AllTables(std::vector& table_schema_array) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allTablesQuery = connectionPtr->query(); + allTablesQuery << "SELECT id, table_id, dimension, engine_type, nlist, index_file_size, metric_type" + << " ,owner_table, partition_tag, version" + << " FROM " << META_TABLES << " WHERE state <> " << std::to_string(TableSchema::TO_DELETE) + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str(); + + res = allTablesQuery.store(); + } // Scoped Connection + + for (auto& resRow : res) { + TableSchema table_schema; + table_schema.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_schema.table_id_); + table_schema.dimension_ = resRow["dimension"]; + table_schema.index_file_size_ = resRow["index_file_size"]; + table_schema.engine_type_ = resRow["engine_type"]; + table_schema.nlist_ = resRow["nlist"]; + table_schema.metric_type_ = resRow["metric_type"]; + resRow["owner_table"].to_string(table_schema.owner_table_); + resRow["partition_tag"].to_string(table_schema.partition_tag_); + resRow["version"].to_string(table_schema.version_); + + table_schema_array.emplace_back(table_schema); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DESCRIBING ALL TABLES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DropTable(const std::string& table_id) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + // soft delete table + mysqlpp::Query deleteTableQuery = connectionPtr->query(); + // + deleteTableQuery << "UPDATE " << META_TABLES << " SET state = " << std::to_string(TableSchema::TO_DELETE) + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTable: " << deleteTableQuery.str(); + + if (!deleteTableQuery.exec()) { + return HandleException("QUERY ERROR WHEN DELETING TABLE", deleteTableQuery.error()); + } + } // Scoped Connection + + if (mode_ == DBOptions::MODE::CLUSTER_WRITABLE) { + DeleteTableFiles(table_id); + } + + ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DELETING TABLE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + // soft delete table files + mysqlpp::Query deleteTableFilesQuery = connectionPtr->query(); + // + deleteTableFilesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) + << " WHERE table_id = " << mysqlpp::quote << table_id << " AND file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTableFiles: " << deleteTableFilesQuery.str(); + + if (!deleteTableFilesQuery.exec()) { + return HandleException("QUERY ERROR WHEN DELETING TABLE FILES", deleteTableFilesQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DELETING TABLE FILES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { + if (file_schema.date_ == EmptyDate) { + file_schema.date_ = utils::GetDate(); + } + TableSchema table_schema; + table_schema.table_id_ = file_schema.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + try { + server::MetricCollector metric; + + NextFileId(file_schema.file_id_); + file_schema.dimension_ = table_schema.dimension_; + file_schema.file_size_ = 0; + file_schema.row_count_ = 0; + file_schema.created_on_ = utils::GetMicroSecTimeStamp(); + file_schema.updated_time_ = file_schema.created_on_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = table_schema.engine_type_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + + std::string id = "NULL"; // auto-increment + std::string table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query createTableFileQuery = connectionPtr->query(); + + createTableFileQuery << "INSERT INTO " << META_TABLEFILES << " VALUES(" << id << ", " << mysqlpp::quote + << table_id << ", " << engine_type << ", " << mysqlpp::quote << file_id << ", " + << file_type << ", " << file_size << ", " << row_count << ", " << updated_time << ", " + << created_on << ", " << date << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTableFile: " << createTableFileQuery.str(); + + if (mysqlpp::SimpleResult res = createTableFileQuery.execute()) { + file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? + + // Consume all results to avoid "Commands out of sync" error + } else { + return HandleException("QUERY ERROR WHEN CREATING TABLE FILE", createTableFileQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + return utils::CreateTableFilePath(options_, file_schema); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what()); + } +} + // TODO(myh): Delete single vecotor by id Status -MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& dates) { +MySQLMetaImpl::DropDataByDate(const std::string& table_id, const DatesT& dates) { if (dates.empty()) { return Status::OK(); } @@ -354,18 +699,18 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query dropPartitionsByDatesQuery = connectionPtr->query(); - dropPartitionsByDatesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "date in (" << dateListStr << ");"; + dropPartitionsByDatesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id << " AND date in (" + << dateListStr << ");"; - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropPartitionsByDates: " << dropPartitionsByDatesQuery.str(); + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropDataByDate: " << dropPartitionsByDatesQuery.str(); if (!dropPartitionsByDatesQuery.exec()) { return HandleException("QUERY ERROR WHEN DROPPING PARTITIONS BY DATES", @@ -373,7 +718,7 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully drop partitions, table id = " << table_schema.table_id_; + ENGINE_LOG_DEBUG << "Successfully drop data by date, table id = " << table_schema.table_id_; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DROPPING PARTITIONS BY DATES", e.what()); } @@ -381,72 +726,782 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } Status -MySQLMetaImpl::CreateTable(TableSchema& table_schema) { +MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, + TableFilesSchema& table_files) { + if (ids.empty()) { + return Status::OK(); + } + + std::stringstream idSS; + for (auto& id : ids) { + idSS << "id = " << std::to_string(id) << " OR "; + } + std::string idStr = idSS.str(); + idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " + + try { + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query getTableFileQuery = connectionPtr->query(); + getTableFileQuery << "SELECT id, engine_type, file_id, file_type, file_size, row_count, date, created_on" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND (" << idStr << ")" + << " AND file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); + + res = getTableFileQuery.store(); + } // Scoped Connection + + TableSchema table_schema; + table_schema.table_id_ = table_id; + DescribeTable(table_schema); + + Status ret; + for (auto& resRow : res) { + TableFileSchema file_schema; + file_schema.id_ = resRow["id"]; + file_schema.table_id_ = table_id; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = resRow["engine_type"]; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + resRow["file_id"].to_string(file_schema.file_id_); + file_schema.file_type_ = resRow["file_type"]; + file_schema.file_size_ = resRow["file_size"]; + file_schema.row_count_ = resRow["row_count"]; + file_schema.date_ = resRow["date"]; + file_schema.created_on_ = resRow["created_on"]; + file_schema.dimension_ = table_schema.dimension_; + + utils::GetTableFilePath(options_, file_schema); + table_files.emplace_back(file_schema); + } + + ENGINE_LOG_DEBUG << "Get table files by id"; + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN RETRIEVING TABLE FILES", e.what()); + } +} + +Status +MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query(); + updateTableIndexParamQuery << "SELECT id, state, dimension, created_on" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); + + mysqlpp::StoreQueryResult res = updateTableIndexParamQuery.store(); + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + + size_t id = resRow["id"]; + int32_t state = resRow["state"]; + uint16_t dimension = resRow["dimension"]; + int64_t created_on = resRow["created_on"]; + + updateTableIndexParamQuery << "UPDATE " << META_TABLES << " SET id = " << id << " ,state = " << state + << " ,dimension = " << dimension << " ,created_on = " << created_on + << " ,engine_type = " << index.engine_type_ << " ,nlist = " << index.nlist_ + << " ,metric_type = " << index.metric_type_ + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); + + if (!updateTableIndexParamQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE INDEX PARAM", + updateTableIndexParamQuery.error()); + } + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE INDEX PARAM", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); + updateTableFlagQuery << "UPDATE " << META_TABLES << " SET flag = " << flag + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str(); + + if (!updateTableFlagQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FLAG", updateTableFlagQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); + } + + return Status::OK(); +} + +// ZR: this function assumes all fields in file_schema have value +Status +MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); + try { server::MetricCollector metric; { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } - mysqlpp::Query createTableQuery = connectionPtr->query(); + mysqlpp::Query updateTableFileQuery = connectionPtr->query(); - if (table_schema.table_id_.empty()) { - NextTableId(table_schema.table_id_); - } else { - createTableQuery << "SELECT state FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ << ";"; + // if the table has been deleted, just mark the table file as TO_DELETE + // clean thread will delete the file later + updateTableFileQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << file_schema.table_id_ << ";"; - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - mysqlpp::StoreQueryResult res = createTableQuery.store(); + mysqlpp::StoreQueryResult res = updateTableFileQuery.store(); - if (res.num_rows() == 1) { - int state = res[0]["state"]; - if (TableSchema::TO_DELETE == state) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); - } else { - return Status(DB_ALREADY_EXIST, "Table already exists"); - } + if (res.num_rows() == 1) { + int state = res[0]["state"]; + if (state == TableSchema::TO_DELETE) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; } + } else { + file_schema.file_type_ = TableFileSchema::TO_DELETE; } - table_schema.id_ = -1; - table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + std::string id = std::to_string(file_schema.id_); + std::string table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); - std::string id = "NULL"; // auto-increment - std::string table_id = table_schema.table_id_; - std::string state = std::to_string(table_schema.state_); - std::string dimension = std::to_string(table_schema.dimension_); - std::string created_on = std::to_string(table_schema.created_on_); - std::string flag = std::to_string(table_schema.flag_); - std::string index_file_size = std::to_string(table_schema.index_file_size_); - std::string engine_type = std::to_string(table_schema.engine_type_); - std::string nlist = std::to_string(table_schema.nlist_); - std::string metric_type = std::to_string(table_schema.metric_type_); + updateTableFileQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote << table_id + << " ,engine_type = " << engine_type << " ,file_id = " << mysqlpp::quote << file_id + << " ,file_type = " << file_type << " ,file_size = " << file_size + << " ,row_count = " << row_count << " ,updated_time = " << updated_time + << " ,created_on = " << created_on << " ,date = " << date << " WHERE id = " << id + << ";"; - createTableQuery << "INSERT INTO " << META_TABLES << " " - << "VALUES(" << id << ", " << mysqlpp::quote << table_id << ", " << state << ", " - << dimension << ", " << created_on << ", " << flag << ", " << index_file_size << ", " - << engine_type << ", " << nlist << ", " << metric_type << ");"; + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); - - if (mysqlpp::SimpleResult res = createTableQuery.execute()) { - table_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? - - // Consume all results to avoid "Commands out of sync" error - } else { - return HandleException("Add Table Error", createTableQuery.error()); + if (!updateTableFileQuery.exec()) { + ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_; + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error()); } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; - return utils::CreateTablePath(options_, table_schema.table_id_); + ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what()); + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { + try { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFilesToIndexQuery = connectionPtr->query(); + + updateTableFilesToIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::RAW) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str(); + + if (!updateTableFilesToIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE TO INDEX", + updateTableFilesToIndexQuery.error()); + } + + ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES TO INDEX", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFilesQuery = connectionPtr->query(); + + std::map has_tables; + for (auto& file_schema : files) { + if (has_tables.find(file_schema.table_id_) != has_tables.end()) { + continue; + } + + updateTableFilesQuery << "SELECT EXISTS" + << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << file_schema.table_id_ << " AND state <> " + << std::to_string(TableSchema::TO_DELETE) << ")" + << " AS " << mysqlpp::quote << "check" + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); + + mysqlpp::StoreQueryResult res = updateTableFilesQuery.store(); + + int check = res[0]["check"]; + has_tables[file_schema.table_id_] = (check == 1); + } + + for (auto& file_schema : files) { + if (!has_tables[file_schema.table_id_]) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; + } + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); + + std::string id = std::to_string(file_schema.id_); + std::string& table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string& file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); + + updateTableFilesQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote + << table_id << " ,engine_type = " << engine_type + << " ,file_id = " << mysqlpp::quote << file_id << " ,file_type = " << file_type + << " ,file_size = " << file_size << " ,row_count = " << row_count + << " ,updated_time = " << updated_time << " ,created_on = " << created_on + << " ,date = " << date << " WHERE id = " << id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); + + if (!updateTableFilesQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error()); + } + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query describeTableIndexQuery = connectionPtr->query(); + describeTableIndexQuery << "SELECT engine_type, nlist, index_file_size, metric_type" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str(); + + mysqlpp::StoreQueryResult res = describeTableIndexQuery.store(); + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + + index.engine_type_ = resRow["engine_type"]; + index.nlist_ = resRow["nlist"]; + index.metric_type_ = resRow["metric_type"]; + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DropTableIndex(const std::string& table_id) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query dropTableIndexQuery = connectionPtr->query(); + + // soft delete index files + dropTableIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::INDEX) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + + // set all backup file to raw + dropTableIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::RAW) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::BACKUP) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + + // set table index type to raw + dropTableIndexQuery << "UPDATE " << META_TABLES + << " SET engine_type = " << std::to_string(DEFAULT_ENGINE_TYPE) + << " ,nlist = " << std::to_string(DEFAULT_NLIST) + << " ,metric_type = " << std::to_string(DEFAULT_METRIC_TYPE) + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DROPPING TABLE INDEX", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) { + server::MetricCollector metric; + + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + // not allow create partition under partition + if (!table_schema.owner_table_.empty()) { + return Status(DB_ERROR, "Nested partition is not allow"); + } + + if (partition_name == "") { + // not allow duplicated partition + std::string exist_partition; + GetPartitionName(table_id, tag, exist_partition); + if (!exist_partition.empty()) { + return Status(DB_ERROR, "Duplicated partition is not allow"); + } + + NextTableId(table_schema.table_id_); + } else { + table_schema.table_id_ = partition_name; + } + + table_schema.id_ = -1; + table_schema.flag_ = 0; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + table_schema.owner_table_ = table_id; + table_schema.partition_tag_ = tag; + + return CreateTable(table_schema); +} + +Status +MySQLMetaImpl::DropPartition(const std::string& partition_name) { + return DropTable(partition_name); +} + +Status +MySQLMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allPartitionsQuery = connectionPtr->query(); + allPartitionsQuery << "SELECT table_id FROM " << META_TABLES << " WHERE owner_table = " << mysqlpp::quote + << table_id << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); + + res = allPartitionsQuery.store(); + } // Scoped Connection + + for (auto& resRow : res) { + meta::TableSchema partition_schema; + resRow["table_id"].to_string(partition_schema.table_id_); + DescribeTable(partition_schema); + partiton_schema_array.emplace_back(partition_schema); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN SHOW PARTITIONS", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allPartitionsQuery = connectionPtr->query(); + allPartitionsQuery << "SELECT table_id FROM " << META_TABLES << " WHERE owner_table = " << mysqlpp::quote + << table_id << " AND partition_tag = " << mysqlpp::quote << tag << " AND state <> " + << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); + + res = allPartitionsQuery.store(); + } // Scoped Connection + + if (res.num_rows() > 0) { + const mysqlpp::Row& resRow = res[0]; + resRow["table_id"].to_string(partition_name); + } else { + return Status(DB_NOT_FOUND, "Partition " + tag + " of table " + table_id + " not found"); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN GET PARTITION NAME", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, + DatePartionedTableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToSearchQuery = connectionPtr->query(); + filesToSearchQuery << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id; + + if (!dates.empty()) { + std::stringstream partitionListSS; + for (auto& date : dates) { + partitionListSS << std::to_string(date) << ", "; + } + std::string partitionListStr = partitionListSS.str(); + + partitionListStr = partitionListStr.substr(0, partitionListStr.size() - 2); // remove the last ", " + filesToSearchQuery << " AND date IN (" << partitionListStr << ")"; + } + + if (!ids.empty()) { + std::stringstream idSS; + for (auto& id : ids) { + idSS << "id = " << std::to_string(id) << " OR "; + } + std::string idStr = idSS.str(); + idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " + + filesToSearchQuery << " AND (" << idStr << ")"; + } + // End + filesToSearchQuery << " AND" + << " (file_type = " << std::to_string(TableFileSchema::RAW) + << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); + + res = filesToSearchQuery.store(); + } // Scoped Connection + + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + Status ret; + TableFileSchema table_file; + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + table_file.index_file_size_ = table_schema.index_file_size_; + table_file.engine_type_ = resRow["engine_type"]; + table_file.nlist_ = table_schema.nlist_; + table_file.metric_type_ = table_schema.metric_type_; + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.file_size_ = resRow["file_size"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.dimension_ = table_schema.dimension_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + auto dateItr = files.find(table_file.date_); + if (dateItr == files.end()) { + files[table_file.date_] = TableFilesSchema(); + } + + files[table_file.date_].push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-search files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO SEARCH", e.what()); + } +} + +Status +MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + + // check table existence + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToMergeQuery = connectionPtr->query(); + filesToMergeQuery + << "SELECT id, table_id, file_id, file_type, file_size, row_count, date, engine_type, created_on" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::RAW) << " ORDER BY row_count DESC;"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToMerge: " << filesToMergeQuery.str(); + + res = filesToMergeQuery.store(); + } // Scoped Connection + + Status ret; + for (auto& resRow : res) { + TableFileSchema table_file; + table_file.file_size_ = resRow["file_size"]; + if (table_file.file_size_ >= table_schema.index_file_size_) { + continue; // skip large file + } + + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.index_file_size_ = table_schema.index_file_size_; + table_file.engine_type_ = resRow["engine_type"]; + table_file.nlist_ = table_schema.nlist_; + table_file.metric_type_ = table_schema.metric_type_; + table_file.created_on_ = resRow["created_on"]; + table_file.dimension_ = table_schema.dimension_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + auto dateItr = files.find(table_file.date_); + if (dateItr == files.end()) { + files[table_file.date_] = TableFilesSchema(); + } + + files[table_file.date_].push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-merge files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO MERGE", e.what()); + } +} + +Status +MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToIndexQuery = connectionPtr->query(); + filesToIndexQuery + << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date, created_on" + << " FROM " << META_TABLEFILES << " WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToIndex: " << filesToIndexQuery.str(); + + res = filesToIndexQuery.store(); + } // Scoped Connection + + Status ret; + std::map groups; + TableFileSchema table_file; + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + table_file.engine_type_ = resRow["engine_type"]; + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.file_size_ = resRow["file_size"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.created_on_ = resRow["created_on"]; + + auto groupItr = groups.find(table_file.table_id_); + if (groupItr == groups.end()) { + TableSchema table_schema; + table_schema.table_id_ = table_file.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + groups[table_file.table_id_] = table_schema; + } + table_file.dimension_ = groups[table_file.table_id_].dimension_; + table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; + table_file.nlist_ = groups[table_file.table_id_].nlist_; + table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + files.push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-index files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO INDEX", e.what()); } } @@ -465,7 +1520,7 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } std::string types; @@ -478,9 +1533,9 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& mysqlpp::Query hasNonIndexFilesQuery = connectionPtr->query(); // since table_id is a unique column we just need to check whether it exists or not - hasNonIndexFilesQuery << "SELECT file_id, file_type FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type in (" << types << ");"; + hasNonIndexFilesQuery << "SELECT file_id, file_type" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type in (" << types << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesByType: " << hasNonIndexFilesQuery.str(); @@ -535,854 +1590,6 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& return Status::OK(); } -Status -MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query(); - updateTableIndexParamQuery << "SELECT id, state, dimension, created_on FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableIndexParamQuery.store(); - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - size_t id = resRow["id"]; - int32_t state = resRow["state"]; - uint16_t dimension = resRow["dimension"]; - int64_t created_on = resRow["created_on"]; - - updateTableIndexParamQuery << "UPDATE " << META_TABLES << " " - << "SET id = " << id << ", " - << "state = " << state << ", " - << "dimension = " << dimension << ", " - << "created_on = " << created_on << ", " - << "engine_type = " << index.engine_type_ << ", " - << "nlist = " << index.nlist_ << ", " - << "metric_type = " << index.metric_type_ << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); - - if (!updateTableIndexParamQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE INDEX PARAM", - updateTableIndexParamQuery.error()); - } - } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE INDEX PARAM", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); - updateTableFlagQuery << "UPDATE " << META_TABLES << " " - << "SET flag = " << flag << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str(); - - if (!updateTableFlagQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FLAG", updateTableFlagQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query describeTableIndexQuery = connectionPtr->query(); - describeTableIndexQuery << "SELECT engine_type, nlist, index_file_size, metric_type FROM " << META_TABLES - << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str(); - - mysqlpp::StoreQueryResult res = describeTableIndexQuery.store(); - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - index.engine_type_ = resRow["engine_type"]; - index.nlist_ = resRow["nlist"]; - index.metric_type_ = resRow["metric_type"]; - } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DropTableIndex(const std::string& table_id) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query dropTableIndexQuery = connectionPtr->query(); - - // soft delete index files - dropTableIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - - // set all backup file to raw - dropTableIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::RAW) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::BACKUP) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - - // set table index type to raw - dropTableIndexQuery << "UPDATE " << META_TABLES << " " - << "SET engine_type = " << std::to_string(DEFAULT_ENGINE_TYPE) << "," - << "nlist = " << std::to_string(DEFAULT_NLIST) << ", " - << "metric_type = " << std::to_string(DEFAULT_METRIC_TYPE) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DROPPING TABLE INDEX", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DeleteTable(const std::string& table_id) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - // soft delete table - mysqlpp::Query deleteTableQuery = connectionPtr->query(); - // - deleteTableQuery << "UPDATE " << META_TABLES << " " - << "SET state = " << std::to_string(TableSchema::TO_DELETE) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTable: " << deleteTableQuery.str(); - - if (!deleteTableQuery.exec()) { - return HandleException("QUERY ERROR WHEN DELETING TABLE", deleteTableQuery.error()); - } - } // Scoped Connection - - if (mode_ == DBOptions::MODE::CLUSTER_WRITABLE) { - DeleteTableFiles(table_id); - } - - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DELETING TABLE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - // soft delete table files - mysqlpp::Query deleteTableFilesQuery = connectionPtr->query(); - // - deleteTableFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << ", " - << "updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTableFiles: " << deleteTableFilesQuery.str(); - - if (!deleteTableFilesQuery.exec()) { - return HandleException("QUERY ERROR WHEN DELETING TABLE FILES", deleteTableFilesQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DELETING TABLE FILES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query describeTableQuery = connectionPtr->query(); - describeTableQuery - << "SELECT id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type " - << " FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str(); - - res = describeTableQuery.store(); - } // Scoped Connection - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - table_schema.id_ = resRow["id"]; // implicit conversion - - table_schema.state_ = resRow["state"]; - - table_schema.dimension_ = resRow["dimension"]; - - table_schema.created_on_ = resRow["created_on"]; - - table_schema.flag_ = resRow["flag"]; - - table_schema.index_file_size_ = resRow["index_file_size"]; - - table_schema.engine_type_ = resRow["engine_type"]; - - table_schema.nlist_ = resRow["nlist"]; - - table_schema.metric_type_ = resRow["metric_type"]; - } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DESCRIBING TABLE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query hasTableQuery = connectionPtr->query(); - // since table_id is a unique column we just need to check whether it exists or not - hasTableQuery << "SELECT EXISTS " - << "(SELECT 1 FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " - << "AS " << mysqlpp::quote << "check" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasTable: " << hasTableQuery.str(); - - res = hasTableQuery.store(); - } // Scoped Connection - - int check = res[0]["check"]; - has_or_not = (check == 1); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CHECKING IF TABLE EXISTS", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::AllTables(std::vector& table_schema_array) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query allTablesQuery = connectionPtr->query(); - allTablesQuery << "SELECT id, table_id, dimension, engine_type, nlist, index_file_size, metric_type FROM " - << META_TABLES << " " - << "WHERE state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str(); - - res = allTablesQuery.store(); - } // Scoped Connection - - for (auto& resRow : res) { - TableSchema table_schema; - - table_schema.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_schema.table_id_ = table_id; - - table_schema.dimension_ = resRow["dimension"]; - - table_schema.index_file_size_ = resRow["index_file_size"]; - - table_schema.engine_type_ = resRow["engine_type"]; - - table_schema.nlist_ = resRow["nlist"]; - - table_schema.metric_type_ = resRow["metric_type"]; - - table_schema_array.emplace_back(table_schema); - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DESCRIBING ALL TABLES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { - if (file_schema.date_ == EmptyDate) { - file_schema.date_ = utils::GetDate(); - } - TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - try { - server::MetricCollector metric; - - NextFileId(file_schema.file_id_); - file_schema.dimension_ = table_schema.dimension_; - file_schema.file_size_ = 0; - file_schema.row_count_ = 0; - file_schema.created_on_ = utils::GetMicroSecTimeStamp(); - file_schema.updated_time_ = file_schema.created_on_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.engine_type_ = table_schema.engine_type_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - std::string id = "NULL"; // auto-increment - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query createTableFileQuery = connectionPtr->query(); - - createTableFileQuery << "INSERT INTO " << META_TABLEFILES << " " - << "VALUES(" << id << ", " << mysqlpp::quote << table_id << ", " << engine_type << ", " - << mysqlpp::quote << file_id << ", " << file_type << ", " << file_size << ", " - << row_count << ", " << updated_time << ", " << created_on << ", " << date << ");"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTableFile: " << createTableFileQuery.str(); - - if (mysqlpp::SimpleResult res = createTableFileQuery.execute()) { - file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? - - // Consume all results to avoid "Commands out of sync" error - } else { - return HandleException("QUERY ERROR WHEN CREATING TABLE FILE", createTableFileQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; - return utils::CreateTableFilePath(options_, file_schema); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToIndexQuery = connectionPtr->query(); - filesToIndexQuery - << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date, created_on FROM " - << META_TABLEFILES << " " - << "WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToIndex: " << filesToIndexQuery.str(); - - res = filesToIndexQuery.store(); - } // Scoped Connection - - Status ret; - std::map groups; - TableFileSchema table_file; - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_file.table_id_ = table_id; - - table_file.engine_type_ = resRow["engine_type"]; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.file_size_ = resRow["file_size"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.created_on_ = resRow["created_on"]; - - auto groupItr = groups.find(table_file.table_id_); - if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - groups[table_file.table_id_] = table_schema; - } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.nlist_ = groups[table_file.table_id_].nlist_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - files.push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-index files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO INDEX", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, - DatePartionedTableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToSearchQuery = connectionPtr->query(); - filesToSearchQuery - << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id; - - if (!dates.empty()) { - std::stringstream partitionListSS; - for (auto& date : dates) { - partitionListSS << std::to_string(date) << ", "; - } - std::string partitionListStr = partitionListSS.str(); - - partitionListStr = partitionListStr.substr(0, partitionListStr.size() - 2); // remove the last ", " - filesToSearchQuery << " AND " - << "date IN (" << partitionListStr << ")"; - } - - if (!ids.empty()) { - std::stringstream idSS; - for (auto& id : ids) { - idSS << "id = " << std::to_string(id) << " OR "; - } - std::string idStr = idSS.str(); - idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " - - filesToSearchQuery << " AND " - << "(" << idStr << ")"; - } - // End - filesToSearchQuery << " AND " - << "(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " - << "file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); - - res = filesToSearchQuery.store(); - } // Scoped Connection - - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - Status ret; - TableFileSchema table_file; - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id_str; - resRow["table_id"].to_string(table_id_str); - table_file.table_id_ = table_id_str; - - table_file.index_file_size_ = table_schema.index_file_size_; - - table_file.engine_type_ = resRow["engine_type"]; - - table_file.nlist_ = table_schema.nlist_; - - table_file.metric_type_ = table_schema.metric_type_; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.file_size_ = resRow["file_size"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.dimension_ = table_schema.dimension_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - auto dateItr = files.find(table_file.date_); - if (dateItr == files.end()) { - files[table_file.date_] = TableFilesSchema(); - } - - files[table_file.date_].push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-search files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO SEARCH", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - - // check table existence - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToMergeQuery = connectionPtr->query(); - filesToMergeQuery - << "SELECT id, table_id, file_id, file_type, file_size, row_count, date, engine_type, created_on FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::RAW) << " " - << "ORDER BY row_count DESC" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToMerge: " << filesToMergeQuery.str(); - - res = filesToMergeQuery.store(); - } // Scoped Connection - - Status ret; - for (auto& resRow : res) { - TableFileSchema table_file; - table_file.file_size_ = resRow["file_size"]; - if (table_file.file_size_ >= table_schema.index_file_size_) { - continue; // skip large file - } - - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id_str; - resRow["table_id"].to_string(table_id_str); - table_file.table_id_ = table_id_str; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.index_file_size_ = table_schema.index_file_size_; - - table_file.engine_type_ = resRow["engine_type"]; - - table_file.nlist_ = table_schema.nlist_; - - table_file.metric_type_ = table_schema.metric_type_; - - table_file.created_on_ = resRow["created_on"]; - - table_file.dimension_ = table_schema.dimension_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - auto dateItr = files.find(table_file.date_); - if (dateItr == files.end()) { - files[table_file.date_] = TableFilesSchema(); - } - - files[table_file.date_].push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-merge files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO MERGE", e.what()); - } -} - -Status -MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { - if (ids.empty()) { - return Status::OK(); - } - - std::stringstream idSS; - for (auto& id : ids) { - idSS << "id = " << std::to_string(id) << " OR "; - } - std::string idStr = idSS.str(); - idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " - - try { - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query getTableFileQuery = connectionPtr->query(); - getTableFileQuery - << "SELECT id, engine_type, file_id, file_type, file_size, row_count, date, created_on FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "(" << idStr << ") AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); - - res = getTableFileQuery.store(); - } // Scoped Connection - - TableSchema table_schema; - table_schema.table_id_ = table_id; - DescribeTable(table_schema); - - Status ret; - for (auto& resRow : res) { - TableFileSchema file_schema; - - file_schema.id_ = resRow["id"]; - - file_schema.table_id_ = table_id; - - file_schema.index_file_size_ = table_schema.index_file_size_; - - file_schema.engine_type_ = resRow["engine_type"]; - - file_schema.nlist_ = table_schema.nlist_; - - file_schema.metric_type_ = table_schema.metric_type_; - - std::string file_id; - resRow["file_id"].to_string(file_id); - file_schema.file_id_ = file_id; - - file_schema.file_type_ = resRow["file_type"]; - - file_schema.file_size_ = resRow["file_size"]; - - file_schema.row_count_ = resRow["row_count"]; - - file_schema.date_ = resRow["date"]; - - file_schema.created_on_ = resRow["created_on"]; - - file_schema.dimension_ = table_schema.dimension_; - - utils::GetTableFilePath(options_, file_schema); - - table_files.emplace_back(file_schema); - } - - ENGINE_LOG_DEBUG << "Get table files by id"; - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN RETRIEVING TABLE FILES", e.what()); - } -} - // TODO(myh): Support swap to cloud storage Status MySQLMetaImpl::Archive() { @@ -1402,14 +1609,14 @@ MySQLMetaImpl::Archive() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query archiveQuery = connectionPtr->query(); - archiveQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " " - << "WHERE created_on < " << std::to_string(now - usecs) << " AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + archiveQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " WHERE created_on < " << std::to_string(now - usecs) << " AND file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Archive: " << archiveQuery.str(); @@ -1446,12 +1653,13 @@ MySQLMetaImpl::Size(uint64_t& result) { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query getSizeQuery = connectionPtr->query(); - getSizeQuery << "SELECT IFNULL(SUM(file_size),0) AS sum FROM " << META_TABLEFILES << " " - << "WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + getSizeQuery << "SELECT IFNULL(SUM(file_size),0) AS sum" + << " FROM " << META_TABLEFILES << " WHERE file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Size: " << getSizeQuery.str(); @@ -1470,434 +1678,20 @@ MySQLMetaImpl::Size(uint64_t& result) { return Status::OK(); } -Status -MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { - if (to_discard_size <= 0) { - return Status::OK(); - } - ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; - - try { - server::MetricCollector metric; - bool status; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query discardFilesQuery = connectionPtr->query(); - discardFilesQuery << "SELECT id, file_size FROM " << META_TABLEFILES << " " - << "WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << " " - << "ORDER BY id ASC " - << "LIMIT 10;"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); - - mysqlpp::StoreQueryResult res = discardFilesQuery.store(); - if (res.num_rows() == 0) { - return Status::OK(); - } - - TableFileSchema table_file; - std::stringstream idsToDiscardSS; - for (auto& resRow : res) { - if (to_discard_size <= 0) { - break; - } - table_file.id_ = resRow["id"]; - table_file.file_size_ = resRow["file_size"]; - idsToDiscardSS << "id = " << std::to_string(table_file.id_) << " OR "; - ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ - << " table_file.size=" << table_file.file_size_; - to_discard_size -= table_file.file_size_; - } - - std::string idsToDiscardStr = idsToDiscardSS.str(); - idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); // remove the last " OR " - - discardFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << ", " - << "updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " - << "WHERE " << idsToDiscardStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); - - status = discardFilesQuery.exec(); - if (!status) { - return HandleException("QUERY ERROR WHEN DISCARDING FILES", discardFilesQuery.error()); - } - } // Scoped Connection - - return DiscardFiles(to_discard_size); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DISCARDING FILES", e.what()); - } -} - -// ZR: this function assumes all fields in file_schema have value -Status -MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFileQuery = connectionPtr->query(); - - // if the table has been deleted, just mark the table file as TO_DELETE - // clean thread will delete the file later - updateTableFileQuery << "SELECT state FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << file_schema.table_id_ << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableFileQuery.store(); - - if (res.num_rows() == 1) { - int state = res[0]["state"]; - if (state == TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - } else { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - - std::string id = std::to_string(file_schema.id_); - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - updateTableFileQuery << "UPDATE " << META_TABLEFILES << " " - << "SET table_id = " << mysqlpp::quote << table_id << ", " - << "engine_type = " << engine_type << ", " - << "file_id = " << mysqlpp::quote << file_id << ", " - << "file_type = " << file_type << ", " - << "file_size = " << file_size << ", " - << "row_count = " << row_count << ", " - << "updated_time = " << updated_time << ", " - << "created_on = " << created_on << ", " - << "date = " << date << " " - << "WHERE id = " << id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - - if (!updateTableFileQuery.exec()) { - ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_; - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { - try { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFilesToIndexQuery = connectionPtr->query(); - - updateTableFilesToIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::RAW) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str(); - - if (!updateTableFilesToIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE TO INDEX", - updateTableFilesToIndexQuery.error()); - } - - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES TO INDEX", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFilesQuery = connectionPtr->query(); - - std::map has_tables; - for (auto& file_schema : files) { - if (has_tables.find(file_schema.table_id_) != has_tables.end()) { - continue; - } - - updateTableFilesQuery << "SELECT EXISTS " - << "(SELECT 1 FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << file_schema.table_id_ << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " - << "AS " << mysqlpp::quote << "check" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableFilesQuery.store(); - - int check = res[0]["check"]; - has_tables[file_schema.table_id_] = (check == 1); - } - - for (auto& file_schema : files) { - if (!has_tables[file_schema.table_id_]) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - - std::string id = std::to_string(file_schema.id_); - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - updateTableFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET table_id = " << mysqlpp::quote << table_id << ", " - << "engine_type = " << engine_type << ", " - << "file_id = " << mysqlpp::quote << file_id << ", " - << "file_type = " << file_type << ", " - << "file_size = " << file_size << ", " - << "row_count = " << row_count << ", " - << "updated_time = " << updated_time << ", " - << "created_on = " << created_on << ", " - << "date = " << date << " " - << "WHERE id = " << id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); - - if (!updateTableFilesQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error()); - } - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { - auto now = utils::GetMicroSecTimeStamp(); - std::set table_ids; - - // remove to_delete files - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT id, table_id, file_id, date FROM " << META_TABLEFILES << " " - << "WHERE file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " AND " - << "updated_time < " << std::to_string(now - seconds * US_PS) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - TableFileSchema table_file; - std::vector idsToDelete; - - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_file.table_id_ = table_id; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.date_ = resRow["date"]; - - utils::DeleteTableFilePath(options_, table_file); - - ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_; - - idsToDelete.emplace_back(std::to_string(table_file.id_)); - - table_ids.insert(table_file.table_id_); - } - - if (!idsToDelete.empty()) { - std::stringstream idsToDeleteSS; - for (auto& id : idsToDelete) { - idsToDeleteSS << "id = " << id << " OR "; - } - - std::string idsToDeleteStr = idsToDeleteSS.str(); - idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " - cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLEFILES << " " - << "WHERE " << idsToDeleteStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - if (!cleanUpFilesWithTTLQuery.exec()) { - return HandleException("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", - cleanUpFilesWithTTLQuery.error()); - } - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Clean " << res.size() << " files deleted in " << seconds << " seconds"; - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", e.what()); - } - - // remove to_delete tables - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT id, table_id FROM " << META_TABLES << " " - << "WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - if (!res.empty()) { - std::stringstream idsToDeleteSS; - for (auto& resRow : res) { - size_t id = resRow["id"]; - std::string table_id; - resRow["table_id"].to_string(table_id); - - utils::DeleteTablePath(options_, table_id, false); // only delete empty folder - - idsToDeleteSS << "id = " << std::to_string(id) << " OR "; - } - std::string idsToDeleteStr = idsToDeleteSS.str(); - idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " - cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLES << " " - << "WHERE " << idsToDeleteStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - if (!cleanUpFilesWithTTLQuery.exec()) { - return HandleException("QUERY ERROR WHEN CLEANING UP TABLES WITH TTL", - cleanUpFilesWithTTLQuery.error()); - } - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Remove " << res.size() << " tables from meta"; - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); - } - - // remove deleted table folder - // don't remove table folder until all its files has been deleted - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - for (auto& table_id : table_ids) { - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT file_id FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - if (res.empty()) { - utils::DeleteTablePath(options_, table_id); - } - } - - if (table_ids.size() > 0) { - ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder"; - } - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); - } - - return Status::OK(); -} - Status MySQLMetaImpl::CleanUp() { try { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query cleanUpQuery = connectionPtr->query(); - cleanUpQuery << "SELECT table_name " - << "FROM information_schema.tables " - << "WHERE table_schema = " << mysqlpp::quote << mysql_connection_pool_->getDB() << " " - << "AND table_name = " << mysqlpp::quote << META_TABLEFILES << ";"; + cleanUpQuery << "SELECT table_name" + << " FROM information_schema.tables" + << " WHERE table_schema = " << mysqlpp::quote << mysql_connection_pool_->getDB() + << " AND table_name = " << mysqlpp::quote << META_TABLEFILES << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str(); @@ -1926,6 +1720,164 @@ MySQLMetaImpl::CleanUp() { return Status::OK(); } +Status +MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { + auto now = utils::GetMicroSecTimeStamp(); + std::set table_ids; + + // remove to_delete files + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT id, table_id, file_id, date" + << " FROM " << META_TABLEFILES + << " WHERE file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " AND updated_time < " << std::to_string(now - seconds * US_PS) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + TableFileSchema table_file; + std::vector idsToDelete; + + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + resRow["file_id"].to_string(table_file.file_id_); + table_file.date_ = resRow["date"]; + + utils::DeleteTableFilePath(options_, table_file); + + ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_; + + idsToDelete.emplace_back(std::to_string(table_file.id_)); + table_ids.insert(table_file.table_id_); + } + + if (!idsToDelete.empty()) { + std::stringstream idsToDeleteSS; + for (auto& id : idsToDelete) { + idsToDeleteSS << "id = " << id << " OR "; + } + + std::string idsToDeleteStr = idsToDeleteSS.str(); + idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " + cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLEFILES << " WHERE " << idsToDeleteStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + if (!cleanUpFilesWithTTLQuery.exec()) { + return HandleException("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", + cleanUpFilesWithTTLQuery.error()); + } + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Clean " << res.size() << " files deleted in " << seconds << " seconds"; + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", e.what()); + } + + // remove to_delete tables + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT id, table_id" + << " FROM " << META_TABLES + << " WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + if (!res.empty()) { + std::stringstream idsToDeleteSS; + for (auto& resRow : res) { + size_t id = resRow["id"]; + std::string table_id; + resRow["table_id"].to_string(table_id); + + utils::DeleteTablePath(options_, table_id, false); // only delete empty folder + + idsToDeleteSS << "id = " << std::to_string(id) << " OR "; + } + std::string idsToDeleteStr = idsToDeleteSS.str(); + idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " + cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLES << " WHERE " << idsToDeleteStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + if (!cleanUpFilesWithTTLQuery.exec()) { + return HandleException("QUERY ERROR WHEN CLEANING UP TABLES WITH TTL", + cleanUpFilesWithTTLQuery.error()); + } + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Remove " << res.size() << " tables from meta"; + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); + } + + // remove deleted table folder + // don't remove table folder until all its files has been deleted + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + for (auto& table_id : table_ids) { + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT file_id" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote + << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + if (res.empty()) { + utils::DeleteTablePath(options_, table_id); + } + } + + if (table_ids.size() > 0) { + ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder"; + } + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); + } + + return Status::OK(); +} + Status MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { try { @@ -1944,15 +1896,15 @@ MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query countQuery = connectionPtr->query(); - countQuery << "SELECT row_count FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " - << "file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + countQuery << "SELECT row_count" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND (file_type = " << std::to_string(TableFileSchema::RAW) + << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Count: " << countQuery.str(); @@ -1978,7 +1930,7 @@ MySQLMetaImpl::DropAll() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query dropTableQuery = connectionPtr->query(); @@ -1995,6 +1947,72 @@ MySQLMetaImpl::DropAll() { } } +Status +MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { + if (to_discard_size <= 0) { + return Status::OK(); + } + ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; + + try { + server::MetricCollector metric; + bool status; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query discardFilesQuery = connectionPtr->query(); + discardFilesQuery << "SELECT id, file_size" + << " FROM " << META_TABLEFILES << " WHERE file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << " ORDER BY id ASC " + << " LIMIT 10;"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); + + mysqlpp::StoreQueryResult res = discardFilesQuery.store(); + if (res.num_rows() == 0) { + return Status::OK(); + } + + TableFileSchema table_file; + std::stringstream idsToDiscardSS; + for (auto& resRow : res) { + if (to_discard_size <= 0) { + break; + } + table_file.id_ = resRow["id"]; + table_file.file_size_ = resRow["file_size"]; + idsToDiscardSS << "id = " << std::to_string(table_file.id_) << " OR "; + ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ + << " table_file.size=" << table_file.file_size_; + to_discard_size -= table_file.file_size_; + } + + std::string idsToDiscardStr = idsToDiscardSS.str(); + idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); // remove the last " OR " + + discardFilesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " WHERE " + << idsToDiscardStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); + + status = discardFilesQuery.exec(); + if (!status) { + return HandleException("QUERY ERROR WHEN DISCARDING FILES", discardFilesQuery.error()); + } + } // Scoped Connection + + return DiscardFiles(to_discard_size); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DISCARDING FILES", e.what()); + } +} + } // namespace meta } // namespace engine } // namespace milvus diff --git a/core/src/db/meta/MySQLMetaImpl.h b/core/src/db/meta/MySQLMetaImpl.h index bb7fb5b59f..00b7627548 100644 --- a/core/src/db/meta/MySQLMetaImpl.h +++ b/core/src/db/meta/MySQLMetaImpl.h @@ -49,7 +49,7 @@ class MySQLMetaImpl : public Meta { AllTables(std::vector& table_schema_array) override; Status - DeleteTable(const std::string& table_id) override; + DropTable(const std::string& table_id) override; Status DeleteTableFiles(const std::string& table_id) override; @@ -58,27 +58,17 @@ class MySQLMetaImpl : public Meta { CreateTableFile(TableFileSchema& file_schema) override; Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override; + DropDataByDate(const std::string& table_id, const DatesT& dates) override; Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; - Status - FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) override; - Status UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; Status UpdateTableFlag(const std::string& table_id, int64_t flag) override; - Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) override; - - Status - DropTableIndex(const std::string& table_id) override; - Status UpdateTableFile(TableFileSchema& file_schema) override; @@ -88,6 +78,24 @@ class MySQLMetaImpl : public Meta { Status UpdateTableFiles(TableFilesSchema& files) override; + Status + DescribeTableIndex(const std::string& table_id, TableIndex& index) override; + + Status + DropTableIndex(const std::string& table_id) override; + + Status + CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override; + + Status + DropPartition(const std::string& partition_name) override; + + Status + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; + Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, DatePartionedTableFilesSchema& files) override; @@ -98,6 +106,10 @@ class MySQLMetaImpl : public Meta { Status FilesToIndex(TableFilesSchema&) override; + Status + FilesByType(const std::string& table_id, const std::vector& file_types, + std::vector& file_ids) override; + Status Archive() override; diff --git a/core/src/db/meta/SqliteMetaImpl.cpp b/core/src/db/meta/SqliteMetaImpl.cpp index cf2a8d77cf..6221dd8ac1 100644 --- a/core/src/db/meta/SqliteMetaImpl.cpp +++ b/core/src/db/meta/SqliteMetaImpl.cpp @@ -57,26 +57,33 @@ HandleException(const std::string& desc, const char* what = nullptr) { } // namespace inline auto -StoragePrototype(const std::string& path) { - return make_storage( - path, - make_table(META_TABLES, make_column("id", &TableSchema::id_, primary_key()), - make_column("table_id", &TableSchema::table_id_, unique()), - make_column("state", &TableSchema::state_), make_column("dimension", &TableSchema::dimension_), - make_column("created_on", &TableSchema::created_on_), - make_column("flag", &TableSchema::flag_, default_value(0)), - make_column("index_file_size", &TableSchema::index_file_size_), - make_column("engine_type", &TableSchema::engine_type_), make_column("nlist", &TableSchema::nlist_), - make_column("metric_type", &TableSchema::metric_type_)), - make_table( - META_TABLEFILES, make_column("id", &TableFileSchema::id_, primary_key()), - make_column("table_id", &TableFileSchema::table_id_), - make_column("engine_type", &TableFileSchema::engine_type_), - make_column("file_id", &TableFileSchema::file_id_), make_column("file_type", &TableFileSchema::file_type_), - make_column("file_size", &TableFileSchema::file_size_, default_value(0)), - make_column("row_count", &TableFileSchema::row_count_, default_value(0)), - make_column("updated_time", &TableFileSchema::updated_time_), - make_column("created_on", &TableFileSchema::created_on_), make_column("date", &TableFileSchema::date_))); +StoragePrototype(const std::string &path) { + return make_storage(path, + make_table(META_TABLES, + make_column("id", &TableSchema::id_, primary_key()), + make_column("table_id", &TableSchema::table_id_, unique()), + make_column("state", &TableSchema::state_), + make_column("dimension", &TableSchema::dimension_), + make_column("created_on", &TableSchema::created_on_), + make_column("flag", &TableSchema::flag_, default_value(0)), + make_column("index_file_size", &TableSchema::index_file_size_), + make_column("engine_type", &TableSchema::engine_type_), + make_column("nlist", &TableSchema::nlist_), + make_column("metric_type", &TableSchema::metric_type_), + make_column("owner_table", &TableSchema::owner_table_, default_value("")), + make_column("partition_tag", &TableSchema::partition_tag_, default_value("")), + make_column("version", &TableSchema::version_, default_value(CURRENT_VERSION))), + make_table(META_TABLEFILES, + make_column("id", &TableFileSchema::id_, primary_key()), + make_column("table_id", &TableFileSchema::table_id_), + make_column("engine_type", &TableFileSchema::engine_type_), + make_column("file_id", &TableFileSchema::file_id_), + make_column("file_type", &TableFileSchema::file_type_), + make_column("file_size", &TableFileSchema::file_size_, default_value(0)), + make_column("row_count", &TableFileSchema::row_count_, default_value(0)), + make_column("updated_time", &TableFileSchema::updated_time_), + make_column("created_on", &TableFileSchema::created_on_), + make_column("date", &TableFileSchema::date_))); } using ConnectorT = decltype(StoragePrototype("")); @@ -151,9 +158,247 @@ SqliteMetaImpl::Initialize() { return Status::OK(); } +Status +SqliteMetaImpl::CreateTable(TableSchema &table_schema) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + if (table_schema.table_id_ == "") { + NextTableId(table_schema.table_id_); + } else { + auto table = ConnectorPtr->select(columns(&TableSchema::state_), + where(c(&TableSchema::table_id_) == table_schema.table_id_)); + if (table.size() == 1) { + if (TableSchema::TO_DELETE == std::get<0>(table[0])) { + return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + } else { + // Change from no error to already exist. + return Status(DB_ALREADY_EXIST, "Table already exists"); + } + } + } + + table_schema.id_ = -1; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + + try { + auto id = ConnectorPtr->insert(table_schema); + table_schema.id_ = id; + } catch (std::exception &e) { + return HandleException("Encounter exception when create table", e.what()); + } + + ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; + + return utils::CreateTablePath(options_, table_schema.table_id_); + } catch (std::exception &e) { + return HandleException("Encounter exception when create table", e.what()); + } +} + +Status +SqliteMetaImpl::DescribeTable(TableSchema &table_schema) { + try { + server::MetricCollector metric; + + auto groups = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::state_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::table_id_) == table_schema.table_id_ + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + + if (groups.size() == 1) { + table_schema.id_ = std::get<0>(groups[0]); + table_schema.state_ = std::get<1>(groups[0]); + table_schema.dimension_ = std::get<2>(groups[0]); + table_schema.created_on_ = std::get<3>(groups[0]); + table_schema.flag_ = std::get<4>(groups[0]); + table_schema.index_file_size_ = std::get<5>(groups[0]); + table_schema.engine_type_ = std::get<6>(groups[0]); + table_schema.nlist_ = std::get<7>(groups[0]); + table_schema.metric_type_ = std::get<8>(groups[0]); + table_schema.owner_table_ = std::get<9>(groups[0]); + table_schema.partition_tag_ = std::get<10>(groups[0]); + table_schema.version_ = std::get<11>(groups[0]); + } else { + return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when describe table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) { + has_or_not = false; + + try { + server::MetricCollector metric; + auto tables = ConnectorPtr->select(columns(&TableSchema::id_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + if (tables.size() == 1) { + has_or_not = true; + } else { + has_or_not = false; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::AllTables(std::vector &table_schema_array) { + try { + server::MetricCollector metric; + + auto selected = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::table_id_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + for (auto &table : selected) { + TableSchema schema; + schema.id_ = std::get<0>(table); + schema.table_id_ = std::get<1>(table); + schema.dimension_ = std::get<2>(table); + schema.created_on_ = std::get<3>(table); + schema.flag_ = std::get<4>(table); + schema.index_file_size_ = std::get<5>(table); + schema.engine_type_ = std::get<6>(table); + schema.nlist_ = std::get<7>(table); + schema.metric_type_ = std::get<8>(table); + schema.owner_table_ = std::get<9>(table); + schema.partition_tag_ = std::get<10>(table); + schema.version_ = std::get<11>(table); + + table_schema_array.emplace_back(schema); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup all tables", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::DropTable(const std::string &table_id) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + //soft delete table + ConnectorPtr->update_all( + set( + c(&TableSchema::state_) = (int) TableSchema::TO_DELETE), + where( + c(&TableSchema::table_id_) == table_id and + c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + + ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when delete table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::DeleteTableFiles(const std::string &table_id) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + //soft delete table files + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + + ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when delete table files", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { + if (file_schema.date_ == EmptyDate) { + file_schema.date_ = utils::GetDate(); + } + TableSchema table_schema; + table_schema.table_id_ = file_schema.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + try { + server::MetricCollector metric; + + NextFileId(file_schema.file_id_); + file_schema.dimension_ = table_schema.dimension_; + file_schema.file_size_ = 0; + file_schema.row_count_ = 0; + file_schema.created_on_ = utils::GetMicroSecTimeStamp(); + file_schema.updated_time_ = file_schema.created_on_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = table_schema.engine_type_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + auto id = ConnectorPtr->insert(file_schema); + file_schema.id_ = id; + + ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + return utils::CreateTableFilePath(options_, file_schema); + } catch (std::exception &e) { + return HandleException("Encounter exception when create table file", e.what()); + } + + return Status::OK(); +} + // TODO(myh): Delete single vecotor by id Status -SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& dates) { +SqliteMetaImpl::DropDataByDate(const std::string &table_id, + const DatesT &dates) { if (dates.empty()) { return Status::OK(); } @@ -193,8 +438,8 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::date_, batch_dates))); } - ENGINE_LOG_DEBUG << "Successfully drop partitions, table id = " << table_schema.table_id_; - } catch (std::exception& e) { + ENGINE_LOG_DEBUG << "Successfully drop data by date, table id = " << table_schema.table_id_; + } catch (std::exception &e) { return HandleException("Encounter exception when drop partition", e.what()); } @@ -202,173 +447,149 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } Status -SqliteMetaImpl::CreateTable(TableSchema& table_schema) { +SqliteMetaImpl::GetTableFiles(const std::string &table_id, + const std::vector &ids, + TableFilesSchema &table_files) { try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - if (table_schema.table_id_ == "") { - NextTableId(table_schema.table_id_); - } else { - auto table = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == table_schema.table_id_)); - if (table.size() == 1) { - if (TableSchema::TO_DELETE == std::get<0>(table[0])) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); - } else { - // Change from no error to already exist. - return Status(DB_ALREADY_EXIST, "Table already exists"); - } - } + table_files.clear(); + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::file_id_, + &TableFileSchema::file_type_, + &TableFileSchema::file_size_, + &TableFileSchema::row_count_, + &TableFileSchema::date_, + &TableFileSchema::engine_type_, + &TableFileSchema::created_on_), + where(c(&TableFileSchema::table_id_) == table_id and + in(&TableFileSchema::id_, ids) and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; } - table_schema.id_ = -1; - table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + Status result; + for (auto &file : files) { + TableFileSchema file_schema; + file_schema.table_id_ = table_id; + file_schema.id_ = std::get<0>(file); + file_schema.file_id_ = std::get<1>(file); + file_schema.file_type_ = std::get<2>(file); + file_schema.file_size_ = std::get<3>(file); + file_schema.row_count_ = std::get<4>(file); + file_schema.date_ = std::get<5>(file); + file_schema.engine_type_ = std::get<6>(file); + file_schema.created_on_ = std::get<7>(file); + file_schema.dimension_ = table_schema.dimension_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; - try { - auto id = ConnectorPtr->insert(table_schema); - table_schema.id_ = id; - } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + utils::GetTableFilePath(options_, file_schema); + + table_files.emplace_back(file_schema); } - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; - - return utils::CreateTablePath(options_, table_schema.table_id_); - } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + ENGINE_LOG_DEBUG << "Get table files by id"; + return result; + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup table files", e.what()); } } Status -SqliteMetaImpl::DeleteTable(const std::string& table_id) { +SqliteMetaImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) { try { server::MetricCollector metric; - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - // soft delete table + //set all backup file to raw ConnectorPtr->update_all( - set(c(&TableSchema::state_) = (int)TableSchema::TO_DELETE), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when delete table", e.what()); + set( + c(&TableSchema::flag_) = flag), + where( + c(&TableSchema::table_id_) == table_id)); + ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + } catch (std::exception &e) { + std::string msg = "Encounter exception when update table flag: table_id = " + table_id; + return HandleException(msg, e.what()); } return Status::OK(); } Status -SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) { +SqliteMetaImpl::UpdateTableFile(TableFileSchema &file_schema) { + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); try { server::MetricCollector metric; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete table files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&TableSchema::state_), + where(c(&TableSchema::table_id_) == file_schema.table_id_)); - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when delete table files", e.what()); + //if the table has been deleted, just mark the table file as TO_DELETE + //clean thread will delete the file later + if (tables.size() < 1 || std::get<0>(tables[0]) == (int) TableSchema::TO_DELETE) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; + } + + ConnectorPtr->update(file_schema); + + ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; + } catch (std::exception &e) { + std::string msg = "Exception update table file: table_id = " + file_schema.table_id_ + + " file_id = " + file_schema.file_id_; + return HandleException(msg, e.what()); } - return Status::OK(); } Status -SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { +SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) { try { server::MetricCollector metric; - auto groups = - ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, - &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, - &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_schema.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); - if (groups.size() == 1) { - table_schema.id_ = std::get<0>(groups[0]); - table_schema.state_ = std::get<1>(groups[0]); - table_schema.dimension_ = std::get<2>(groups[0]); - table_schema.created_on_ = std::get<3>(groups[0]); - table_schema.flag_ = std::get<4>(groups[0]); - table_schema.index_file_size_ = std::get<5>(groups[0]); - table_schema.engine_type_ = std::get<6>(groups[0]); - table_schema.nlist_ = std::get<7>(groups[0]); - table_schema.metric_type_ = std::get<8>(groups[0]); - } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when describe table", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) { - if (file_types.empty()) { - return Status(DB_ERROR, "file types array is empty"); - } - - try { - file_ids.clear(); - auto selected = ConnectorPtr->select( - columns(&TableFileSchema::file_id_, &TableFileSchema::file_type_), - where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); - - if (selected.size() >= 1) { - int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; - int to_index_count = 0, index_count = 0, backup_count = 0; - for (auto& file : selected) { - file_ids.push_back(std::get<0>(file)); - switch (std::get<1>(file)) { - case (int)TableFileSchema::RAW: - raw_count++; - break; - case (int)TableFileSchema::NEW: - new_count++; - break; - case (int)TableFileSchema::NEW_MERGE: - new_merge_count++; - break; - case (int)TableFileSchema::NEW_INDEX: - new_index_count++; - break; - case (int)TableFileSchema::TO_INDEX: - to_index_count++; - break; - case (int)TableFileSchema::INDEX: - index_count++; - break; - case (int)TableFileSchema::BACKUP: - backup_count++; - break; - default: - break; - } + std::map has_tables; + for (auto &file : files) { + if (has_tables.find(file.table_id_) != has_tables.end()) { + continue; + } + auto tables = ConnectorPtr->select(columns(&TableSchema::id_), + where(c(&TableSchema::table_id_) == file.table_id_ + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + if (tables.size() >= 1) { + has_tables[file.table_id_] = true; + } else { + has_tables[file.table_id_] = false; } - - ENGINE_LOG_DEBUG << "Table " << table_id << " currently has raw files:" << raw_count - << " new files:" << new_count << " new_merge files:" << new_merge_count - << " new_index files:" << new_index_count << " to_index files:" << to_index_count - << " index files:" << index_count << " backup files:" << backup_count; } - } catch (std::exception& e) { - return HandleException("Encounter exception when check non index files", e.what()); + + auto commited = ConnectorPtr->transaction([&]() mutable { + for (auto &file : files) { + if (!has_tables[file.table_id_]) { + file.file_type_ = TableFileSchema::TO_DELETE; + } + + file.updated_time_ = utils::GetMicroSecTimeStamp(); + ConnectorPtr->update(file); + } + return true; + }); + + if (!commited) { + return HandleException("UpdateTableFiles error: sqlite transaction failed"); + } + + ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + } catch (std::exception &e) { + return HandleException("Encounter exception when update table files", e.what()); } return Status::OK(); } @@ -381,10 +602,17 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::state_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); if (tables.size() > 0) { meta::TableSchema table_schema; @@ -395,6 +623,9 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& table_schema.created_on_ = std::get<3>(tables[0]); table_schema.flag_ = std::get<4>(tables[0]); table_schema.index_file_size_ = std::get<5>(tables[0]); + table_schema.owner_table_ = std::get<6>(tables[0]); + table_schema.partition_tag_ = std::get<7>(tables[0]); + table_schema.version_ = std::get<8>(tables[0]); table_schema.engine_type_ = index.engine_type_; table_schema.nlist_ = index.nlist_; table_schema.metric_type_ = index.metric_type_; @@ -404,11 +635,14 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); } - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); + //set all backup file to raw + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; } catch (std::exception& e) { @@ -420,16 +654,23 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& } Status -SqliteMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { +SqliteMetaImpl::UpdateTableFilesToIndex(const std::string &table_id) { try { server::MetricCollector metric; - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableSchema::flag_) = flag), where(c(&TableSchema::table_id_) == table_id)); - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; - } catch (std::exception& e) { - std::string msg = "Encounter exception when update table flag: table_id = " + table_id; - return HandleException(msg, e.what()); + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_INDEX), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW)); + + ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when update table files to to_index", e.what()); } return Status::OK(); @@ -440,9 +681,11 @@ SqliteMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& inde try { server::MetricCollector metric; - auto groups = ConnectorPtr->select( - columns(&TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + auto groups = ConnectorPtr->select(columns(&TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); if (groups.size() == 1) { index.engine_type_ = std::get<0>(groups[0]); @@ -466,26 +709,35 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete index files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::INDEX)); - - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); - - // set table index type to raw + //soft delete index files ConnectorPtr->update_all( - set(c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, c(&TableSchema::nlist_) = DEFAULT_NLIST, + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::INDEX)); + + //set all backup file to raw + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); + + //set table index type to raw + ConnectorPtr->update_all( + set( + c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, + c(&TableSchema::nlist_) = DEFAULT_NLIST, c(&TableSchema::metric_type_) = DEFAULT_METRIC_TYPE), - where(c(&TableSchema::table_id_) == table_id)); + where( + c(&TableSchema::table_id_) == table_id)); ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; - } catch (std::exception& e) { + } catch (std::exception &e) { return HandleException("Encounter exception when delete table index files", e.what()); } @@ -493,158 +745,94 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { } Status -SqliteMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { - has_or_not = false; +SqliteMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) { + server::MetricCollector metric; - try { - server::MetricCollector metric; - auto tables = ConnectorPtr->select( - columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - if (tables.size() == 1) { - has_or_not = true; - } else { - has_or_not = false; - } - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::AllTables(std::vector& table_schema_array) { - try { - server::MetricCollector metric; - - auto selected = - ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::table_id_, &TableSchema::dimension_, - &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, - &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - for (auto& table : selected) { - TableSchema schema; - schema.id_ = std::get<0>(table); - schema.table_id_ = std::get<1>(table); - schema.dimension_ = std::get<2>(table); - schema.created_on_ = std::get<3>(table); - schema.flag_ = std::get<4>(table); - schema.index_file_size_ = std::get<5>(table); - schema.engine_type_ = std::get<6>(table); - schema.nlist_ = std::get<7>(table); - schema.metric_type_ = std::get<8>(table); - - table_schema_array.emplace_back(schema); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup all tables", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::CreateTableFile(TableFileSchema& file_schema) { - if (file_schema.date_ == EmptyDate) { - file_schema.date_ = utils::GetDate(); - } TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; + table_schema.table_id_ = table_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } + // not allow create partition under partition + if(!table_schema.owner_table_.empty()) { + return Status(DB_ERROR, "Nested partition is not allow"); + } + + if (partition_name == "") { + // not allow duplicated partition + std::string exist_partition; + GetPartitionName(table_id, tag, exist_partition); + if(!exist_partition.empty()) { + return Status(DB_ERROR, "Duplicated partition is not allow"); + } + + NextTableId(table_schema.table_id_); + } else { + table_schema.table_id_ = partition_name; + } + + table_schema.id_ = -1; + table_schema.flag_ = 0; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + table_schema.owner_table_ = table_id; + table_schema.partition_tag_ = tag; + + return CreateTable(table_schema); +} + +Status +SqliteMetaImpl::DropPartition(const std::string& partition_name) { + return DropTable(partition_name); +} + +Status +SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { try { server::MetricCollector metric; - NextFileId(file_schema.file_id_); - file_schema.dimension_ = table_schema.dimension_; - file_schema.file_size_ = 0; - file_schema.row_count_ = 0; - file_schema.created_on_ = utils::GetMicroSecTimeStamp(); - file_schema.updated_time_ = file_schema.created_on_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.engine_type_ = table_schema.engine_type_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - auto id = ConnectorPtr->insert(file_schema); - file_schema.id_ = id; - - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; - return utils::CreateTableFilePath(options_, file_schema); - } catch (std::exception& e) { - return HandleException("Encounter exception when create table file", e.what()); + auto partitions = ConnectorPtr->select(columns(&TableSchema::table_id_), + where(c(&TableSchema::owner_table_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + for(size_t i = 0; i < partitions.size(); i++) { + std::string partition_name = std::get<0>(partitions[i]); + meta::TableSchema partition_schema; + partition_schema.table_id_ = partition_name; + DescribeTable(partition_schema); + partiton_schema_array.emplace_back(partition_schema); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when show partitions", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { - files.clear(); - +SqliteMetaImpl::GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) { try { server::MetricCollector metric; - auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::file_id_, - &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, - &TableFileSchema::date_, &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_INDEX)); - - std::map groups; - TableFileSchema table_file; - - Status ret; - for (auto& file : selected) { - table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); - table_file.file_id_ = std::get<2>(file); - table_file.file_type_ = std::get<3>(file); - table_file.file_size_ = std::get<4>(file); - table_file.row_count_ = std::get<5>(file); - table_file.date_ = std::get<6>(file); - table_file.engine_type_ = std::get<7>(file); - table_file.created_on_ = std::get<8>(file); - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - auto groupItr = groups.find(table_file.table_id_); - if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - groups[table_file.table_id_] = table_schema; - } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.nlist_ = groups[table_file.table_id_].nlist_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; - files.push_back(table_file); + auto name = ConnectorPtr->select(columns(&TableSchema::table_id_), + where(c(&TableSchema::owner_table_) == table_id + and c(&TableSchema::partition_tag_) == tag)); + if (name.size() > 0) { + partition_name = std::get<0>(name[0]); + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + "'s partition " + tag + " not found"); } - - if (selected.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("Encounter exception when iterate raw files", e.what()); + } catch (std::exception &e) { + return HandleException("Encounter exception when get partition name", e.what()); } + + return Status::OK(); } Status -SqliteMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, +SqliteMetaImpl::FilesToSearch(const std::string& table_id, + const std::vector& ids, + const DatesT& dates, DatePartionedTableFilesSchema& files) { files.clear(); server::MetricCollector metric; @@ -824,53 +1012,120 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFile } Status -SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { +SqliteMetaImpl::FilesToIndex(TableFilesSchema &files) { + files.clear(); + try { - table_files.clear(); - auto files = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::file_id_, &TableFileSchema::file_type_, - &TableFileSchema::file_size_, &TableFileSchema::row_count_, &TableFileSchema::date_, - &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::id_, ids) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + server::MetricCollector metric; - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; + auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::table_id_, + &TableFileSchema::file_id_, + &TableFileSchema::file_type_, + &TableFileSchema::file_size_, + &TableFileSchema::row_count_, + &TableFileSchema::date_, + &TableFileSchema::engine_type_, + &TableFileSchema::created_on_), + where(c(&TableFileSchema::file_type_) + == (int) TableFileSchema::TO_INDEX)); + + std::map groups; + TableFileSchema table_file; + + Status ret; + for (auto &file : selected) { + table_file.id_ = std::get<0>(file); + table_file.table_id_ = std::get<1>(file); + table_file.file_id_ = std::get<2>(file); + table_file.file_type_ = std::get<3>(file); + table_file.file_size_ = std::get<4>(file); + table_file.row_count_ = std::get<5>(file); + table_file.date_ = std::get<6>(file); + table_file.engine_type_ = std::get<7>(file); + table_file.created_on_ = std::get<8>(file); + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + auto groupItr = groups.find(table_file.table_id_); + if (groupItr == groups.end()) { + TableSchema table_schema; + table_schema.table_id_ = table_file.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + groups[table_file.table_id_] = table_schema; + } + table_file.dimension_ = groups[table_file.table_id_].dimension_; + table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; + table_file.nlist_ = groups[table_file.table_id_].nlist_; + table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + files.push_back(table_file); } - Status result; - for (auto& file : files) { - TableFileSchema file_schema; - file_schema.table_id_ = table_id; - file_schema.id_ = std::get<0>(file); - file_schema.file_id_ = std::get<1>(file); - file_schema.file_type_ = std::get<2>(file); - file_schema.file_size_ = std::get<3>(file); - file_schema.row_count_ = std::get<4>(file); - file_schema.date_ = std::get<5>(file); - file_schema.engine_type_ = std::get<6>(file); - file_schema.created_on_ = std::get<7>(file); - file_schema.dimension_ = table_schema.dimension_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - utils::GetTableFilePath(options_, file_schema); - - table_files.emplace_back(file_schema); + if (selected.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files"; } - - ENGINE_LOG_DEBUG << "Get table files by id"; - return result; - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table files", e.what()); + return ret; + } catch (std::exception &e) { + return HandleException("Encounter exception when iterate raw files", e.what()); } } +Status +SqliteMetaImpl::FilesByType(const std::string &table_id, + const std::vector &file_types, + std::vector &file_ids) { + if (file_types.empty()) { + return Status(DB_ERROR, "file types array is empty"); + } + + try { + file_ids.clear(); + auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_, + &TableFileSchema::file_type_), + where(in(&TableFileSchema::file_type_, file_types) + and c(&TableFileSchema::table_id_) == table_id)); + + if (selected.size() >= 1) { + int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; + int to_index_count = 0, index_count = 0, backup_count = 0; + for (auto &file : selected) { + file_ids.push_back(std::get<0>(file)); + switch (std::get<1>(file)) { + case (int) TableFileSchema::RAW:raw_count++; + break; + case (int) TableFileSchema::NEW:new_count++; + break; + case (int) TableFileSchema::NEW_MERGE:new_merge_count++; + break; + case (int) TableFileSchema::NEW_INDEX:new_index_count++; + break; + case (int) TableFileSchema::TO_INDEX:to_index_count++; + break; + case (int) TableFileSchema::INDEX:index_count++; + break; + case (int) TableFileSchema::BACKUP:backup_count++; + break; + default:break; + } + } + + ENGINE_LOG_DEBUG << "Table " << table_id << " currently has raw files:" << raw_count + << " new files:" << new_count << " new_merge files:" << new_merge_count + << " new_index files:" << new_index_count << " to_index files:" << to_index_count + << " index files:" << index_count << " backup files:" << backup_count; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when check non index files", e.what()); + } + return Status::OK(); +} + + // TODO(myh): Support swap to cloud storage Status SqliteMetaImpl::Archive() { @@ -889,10 +1144,13 @@ SqliteMetaImpl::Archive() { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE), - where(c(&TableFileSchema::created_on_) < (int64_t)(now - usecs) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); - } catch (std::exception& e) { + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE), + where( + c(&TableFileSchema::created_on_) < (int64_t) (now - usecs) and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + } catch (std::exception &e) { return HandleException("Encounter exception when update table files", e.what()); } @@ -932,152 +1190,40 @@ SqliteMetaImpl::Size(uint64_t& result) { } Status -SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { - if (to_discard_size <= 0) { - return Status::OK(); - } - - ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; - +SqliteMetaImpl::CleanUp() { try { server::MetricCollector metric; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); + std::vector file_types = { + (int) TableFileSchema::NEW, + (int) TableFileSchema::NEW_INDEX, + (int) TableFileSchema::NEW_MERGE + }; + auto files = + ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); + auto commited = ConnectorPtr->transaction([&]() mutable { - auto selected = - ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::file_size_), - where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE), - order_by(&TableFileSchema::id_), limit(10)); - - std::vector ids; - TableFileSchema table_file; - - for (auto& file : selected) { - if (to_discard_size <= 0) - break; - table_file.id_ = std::get<0>(file); - table_file.file_size_ = std::get<1>(file); - ids.push_back(table_file.id_); - ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ - << " table_file.size=" << table_file.file_size_; - to_discard_size -= table_file.file_size_; - } - - if (ids.size() == 0) { - return true; - } - - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(in(&TableFileSchema::id_, ids))); - - return true; - }); - - if (!commited) { - return HandleException("DiscardFiles error: sqlite transaction failed"); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when discard table file", e.what()); - } - - return DiscardFiles(to_discard_size); -} - -Status -SqliteMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - auto tables = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == file_schema.table_id_)); - - // if the table has been deleted, just mark the table file as TO_DELETE - // clean thread will delete the file later - if (tables.size() < 1 || std::get<0>(tables[0]) == (int)TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - - ConnectorPtr->update(file_schema); - - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; - } catch (std::exception& e) { - std::string msg = - "Exception update table file: table_id = " + file_schema.table_id_ + " file_id = " + file_schema.file_id_; - return HandleException(msg, e.what()); - } - return Status::OK(); -} - -Status -SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_INDEX), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW)); - - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when update table files to to_index", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - std::map has_tables; - for (auto& file : files) { - if (has_tables.find(file.table_id_) != has_tables.end()) { - continue; - } - auto tables = ConnectorPtr->select(columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == file.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - if (tables.size() >= 1) { - has_tables[file.table_id_] = true; - } else { - has_tables[file.table_id_] = false; - } - } - - auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto& file : files) { - if (!has_tables[file.table_id_]) { - file.file_type_ = TableFileSchema::TO_DELETE; - } - - file.updated_time_ = utils::GetMicroSecTimeStamp(); - ConnectorPtr->update(file); + for (auto &file : files) { + ENGINE_LOG_DEBUG << "Remove table file type as NEW"; + ConnectorPtr->remove(std::get<0>(file)); } return true; }); if (!commited) { - return HandleException("UpdateTableFiles error: sqlite transaction failed"); + return HandleException("CleanUp error: sqlite transaction failed"); } - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; - } catch (std::exception& e) { - return HandleException("Encounter exception when update table files", e.what()); + if (files.size() > 0) { + ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when clean table file", e.what()); } + return Status::OK(); } @@ -1093,10 +1239,16 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::table_id_, - &TableFileSchema::file_id_, &TableFileSchema::date_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_DELETE and - c(&TableFileSchema::updated_time_) < now - seconds * US_PS)); + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::table_id_, + &TableFileSchema::file_id_, + &TableFileSchema::date_), + where( + c(&TableFileSchema::file_type_) == + (int) TableFileSchema::TO_DELETE + and + c(&TableFileSchema::updated_time_) + < now - seconds * US_PS)); auto commited = ConnectorPtr->transaction([&]() mutable { TableFileSchema table_file; @@ -1180,42 +1332,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { } Status -SqliteMetaImpl::CleanUp() { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - std::vector file_types = {(int)TableFileSchema::NEW, (int)TableFileSchema::NEW_INDEX, - (int)TableFileSchema::NEW_MERGE}; - auto files = - ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); - - auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto& file : files) { - ENGINE_LOG_DEBUG << "Remove table file type as NEW"; - ConnectorPtr->remove(std::get<0>(file)); - } - return true; - }); - - if (!commited) { - return HandleException("CleanUp error: sqlite transaction failed"); - } - - if (files.size() > 0) { - ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; - } - } catch (std::exception& e) { - return HandleException("Encounter exception when clean table file", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::Count(const std::string& table_id, uint64_t& result) { +SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) { try { server::MetricCollector metric; @@ -1257,6 +1374,66 @@ SqliteMetaImpl::DropAll() { return Status::OK(); } -} // namespace meta -} // namespace engine -} // namespace milvus +Status +SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { + if (to_discard_size <= 0) { + return Status::OK(); + } + + ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; + + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + auto commited = ConnectorPtr->transaction([&]() mutable { + auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::file_size_), + where(c(&TableFileSchema::file_type_) + != (int) TableFileSchema::TO_DELETE), + order_by(&TableFileSchema::id_), + limit(10)); + + std::vector ids; + TableFileSchema table_file; + + for (auto &file : selected) { + if (to_discard_size <= 0) break; + table_file.id_ = std::get<0>(file); + table_file.file_size_ = std::get<1>(file); + ids.push_back(table_file.id_); + ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ + << " table_file.size=" << table_file.file_size_; + to_discard_size -= table_file.file_size_; + } + + if (ids.size() == 0) { + return true; + } + + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + in(&TableFileSchema::id_, ids))); + + return true; + }); + + if (!commited) { + return HandleException("DiscardFiles error: sqlite transaction failed"); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when discard table file", e.what()); + } + + return DiscardFiles(to_discard_size); +} + +} // namespace meta +} // namespace engine +} // namespace milvus + diff --git a/core/src/db/meta/SqliteMetaImpl.h b/core/src/db/meta/SqliteMetaImpl.h index 0fc3f3c4ba..84d97ed49d 100644 --- a/core/src/db/meta/SqliteMetaImpl.h +++ b/core/src/db/meta/SqliteMetaImpl.h @@ -49,7 +49,7 @@ class SqliteMetaImpl : public Meta { AllTables(std::vector& table_schema_array) override; Status - DeleteTable(const std::string& table_id) override; + DropTable(const std::string& table_id) override; Status DeleteTableFiles(const std::string& table_id) override; @@ -58,21 +58,26 @@ class SqliteMetaImpl : public Meta { CreateTableFile(TableFileSchema& file_schema) override; Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override; + DropDataByDate(const std::string& table_id, const DatesT& dates) override; Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; - Status - FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) override; - Status UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; Status UpdateTableFlag(const std::string& table_id, int64_t flag) override; + Status + UpdateTableFile(TableFileSchema& file_schema) override; + + Status + UpdateTableFilesToIndex(const std::string& table_id) override; + + Status + UpdateTableFiles(TableFilesSchema& files) override; + Status DescribeTableIndex(const std::string& table_id, TableIndex& index) override; @@ -80,13 +85,16 @@ class SqliteMetaImpl : public Meta { DropTableIndex(const std::string& table_id) override; Status - UpdateTableFilesToIndex(const std::string& table_id) override; + CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override; Status - UpdateTableFile(TableFileSchema& file_schema) override; + DropPartition(const std::string& partition_name) override; Status - UpdateTableFiles(TableFilesSchema& files) override; + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, @@ -99,11 +107,15 @@ class SqliteMetaImpl : public Meta { FilesToIndex(TableFilesSchema&) override; Status - Archive() override; + FilesByType(const std::string& table_id, const std::vector& file_types, + std::vector& file_ids) override; Status Size(uint64_t& result) override; + Status + Archive() override; + Status CleanUp() override; diff --git a/core/src/grpc/gen-milvus/milvus.grpc.pb.cc b/core/src/grpc/gen-milvus/milvus.grpc.pb.cc index 82a1b99162..9cb5e70d3d 100644 --- a/core/src/grpc/gen-milvus/milvus.grpc.pb.cc +++ b/core/src/grpc/gen-milvus/milvus.grpc.pb.cc @@ -22,19 +22,22 @@ namespace grpc { static const char* MilvusService_method_names[] = { "/milvus.grpc.MilvusService/CreateTable", "/milvus.grpc.MilvusService/HasTable", - "/milvus.grpc.MilvusService/DropTable", - "/milvus.grpc.MilvusService/CreateIndex", - "/milvus.grpc.MilvusService/Insert", - "/milvus.grpc.MilvusService/Search", - "/milvus.grpc.MilvusService/SearchInFiles", "/milvus.grpc.MilvusService/DescribeTable", "/milvus.grpc.MilvusService/CountTable", "/milvus.grpc.MilvusService/ShowTables", - "/milvus.grpc.MilvusService/Cmd", - "/milvus.grpc.MilvusService/DeleteByRange", - "/milvus.grpc.MilvusService/PreloadTable", + "/milvus.grpc.MilvusService/DropTable", + "/milvus.grpc.MilvusService/CreateIndex", "/milvus.grpc.MilvusService/DescribeIndex", "/milvus.grpc.MilvusService/DropIndex", + "/milvus.grpc.MilvusService/CreatePartition", + "/milvus.grpc.MilvusService/ShowPartitions", + "/milvus.grpc.MilvusService/DropPartition", + "/milvus.grpc.MilvusService/Insert", + "/milvus.grpc.MilvusService/Search", + "/milvus.grpc.MilvusService/SearchInFiles", + "/milvus.grpc.MilvusService/Cmd", + "/milvus.grpc.MilvusService/DeleteByDate", + "/milvus.grpc.MilvusService/PreloadTable", }; std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) { @@ -46,19 +49,22 @@ std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_p MilvusService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel) : channel_(channel), rpcmethod_CreateTable_(MilvusService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) , rpcmethod_HasTable_(MilvusService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DropTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_CreateIndex_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Insert_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Search_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_SearchInFiles_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DescribeTable_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_CountTable_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_ShowTables_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Cmd_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DeleteByRange_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PreloadTable_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DescribeIndex_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DropIndex_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DescribeTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CountTable_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ShowTables_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropTable_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CreateIndex_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DescribeIndex_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropIndex_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CreatePartition_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ShowPartitions_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropPartition_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Insert_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Search_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_SearchInFiles_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Cmd_(MilvusService_method_names[15], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DeleteByDate_(MilvusService_method_names[16], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PreloadTable_(MilvusService_method_names[17], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) {} ::grpc::Status MilvusService::Stub::CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) { @@ -117,146 +123,6 @@ void MilvusService::Stub::experimental_async::HasTable(::grpc::ClientContext* co return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::BoolReply>::Create(channel_.get(), cq, rpcmethod_HasTable_, context, request, false); } -::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false); -} - -::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false); -} - -::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false); -} - -::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false); -} - -::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false); -} - ::grpc::Status MilvusService::Stub::DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) { return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DescribeTable_, context, request, response); } @@ -341,88 +207,60 @@ void MilvusService::Stub::experimental_async::ShowTables(::grpc::ClientContext* return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TableNameList>::Create(channel_.get(), cq, rpcmethod_ShowTables_, context, request, false); } -::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response); +::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false); } -::grpc::Status MilvusService::Stub::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByRange_, context, request, response); +::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, true); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, false); -} - -::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false); } ::grpc::Status MilvusService::Stub::DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) { @@ -481,6 +319,258 @@ void MilvusService::Stub::experimental_async::DropIndex(::grpc::ClientContext* c return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropIndex_, context, request, false); } +::grpc::Status MilvusService::Stub::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreatePartition_, context, request, response); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, false); +} + +::grpc::Status MilvusService::Stub::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShowPartitions_, context, request, response); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, false); +} + +::grpc::Status MilvusService::Stub::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropPartition_, context, request, response); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false); +} + +::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false); +} + +::grpc::Status MilvusService::Stub::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByDate_, context, request, response); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, false); +} + +::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false); +} + MilvusService::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( MilvusService_method_names[0], @@ -495,68 +585,83 @@ MilvusService::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::DropTable), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[3], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::CreateIndex), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[4], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( - std::mem_fn(&MilvusService::Service::Insert), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[5], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( - std::mem_fn(&MilvusService::Service::Search), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[6], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( - std::mem_fn(&MilvusService::Service::SearchInFiles), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[7], - ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>( std::mem_fn(&MilvusService::Service::DescribeTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[8], + MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>( std::mem_fn(&MilvusService::Service::CountTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[9], + MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::TableNameList>( std::mem_fn(&MilvusService::Service::ShowTables), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[10], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>( - std::mem_fn(&MilvusService::Service::Cmd), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[11], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::DeleteByRange), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[12], + MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::PreloadTable), this))); + std::mem_fn(&MilvusService::Service::DropTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[13], + MilvusService_method_names[6], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::CreateIndex), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>( std::mem_fn(&MilvusService::Service::DescribeIndex), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[14], + MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( std::mem_fn(&MilvusService::Service::DropIndex), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[9], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::CreatePartition), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[10], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>( + std::mem_fn(&MilvusService::Service::ShowPartitions), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[11], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::DropPartition), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[12], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( + std::mem_fn(&MilvusService::Service::Insert), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[13], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( + std::mem_fn(&MilvusService::Service::Search), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[14], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( + std::mem_fn(&MilvusService::Service::SearchInFiles), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[15], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>( + std::mem_fn(&MilvusService::Service::Cmd), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[16], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::DeleteByDate), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[17], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::PreloadTable), this))); } MilvusService::Service::~Service() { @@ -576,41 +681,6 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - ::grpc::Status MilvusService::Service::DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) { (void) context; (void) request; @@ -632,21 +702,14 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) { +::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { (void) context; (void) request; (void) response; return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { +::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) { (void) context; (void) request; (void) response; @@ -667,6 +730,69 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status MilvusService::Service::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + } // namespace milvus } // namespace grpc diff --git a/core/src/grpc/gen-milvus/milvus.grpc.pb.h b/core/src/grpc/gen-milvus/milvus.grpc.pb.h index 8ea2d13c80..439984f543 100644 --- a/core/src/grpc/gen-milvus/milvus.grpc.pb.h +++ b/core/src/grpc/gen-milvus/milvus.grpc.pb.h @@ -48,12 +48,11 @@ class MilvusService final { public: virtual ~StubInterface() {} // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual ::grpc::Status CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateTableRaw(context, request, cq)); @@ -62,12 +61,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateTableRaw(context, request, cq)); } // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual ::grpc::Status HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::BoolReply* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>> AsyncHasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>>(AsyncHasTableRaw(context, request, cq)); @@ -76,93 +74,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>>(PrepareAsyncHasTableRaw(context, request, cq)); } // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); - } - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); - } - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); - } - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); - } - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); - } - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual ::grpc::Status DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>> AsyncDescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>>(AsyncDescribeTableRaw(context, request, cq)); @@ -171,13 +87,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>>(PrepareAsyncDescribeTableRaw(context, request, cq)); } // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual ::grpc::Status CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableRowCount* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>> AsyncCountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>>(AsyncCountTableRaw(context, request, cq)); @@ -186,12 +100,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>>(PrepareAsyncCountTableRaw(context, request, cq)); } // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual ::grpc::Status ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::TableNameList* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>> AsyncShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>>(AsyncShowTablesRaw(context, request, cq)); @@ -200,50 +113,37 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>>(PrepareAsyncShowTablesRaw(context, request, cq)); } // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + // @return TableNameList + virtual ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); } // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual ::grpc::Status DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDeleteByRangeRaw(context, request, cq)); + // @return Status + virtual ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDeleteByRangeRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); } // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); - } - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual ::grpc::Status DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>> AsyncDescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>>(AsyncDescribeIndexRaw(context, request, cq)); @@ -252,11 +152,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>>(PrepareAsyncDescribeIndexRaw(context, request, cq)); } // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual ::grpc::Status DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropIndexRaw(context, request, cq)); @@ -264,181 +164,306 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropIndexRaw(context, request, cq)); } + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual ::grpc::Status CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreatePartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreatePartitionRaw(context, request, cq)); + } + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual ::grpc::Status ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>> AsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>>(AsyncShowPartitionsRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>> PrepareAsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>>(PrepareAsyncShowPartitionsRaw(context, request, cq)); + } + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual ::grpc::Status DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropPartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropPartitionRaw(context, request, cq)); + } + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); + } + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); + } + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); + } + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + } + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual ::grpc::Status DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDeleteByDateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDeleteByDateRaw(context, request, cq)); + } + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + } class experimental_async_interface { public: virtual ~experimental_async_interface() {} // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual void CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, std::function) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, std::function) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, std::function) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, std::function) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual void CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, std::function) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableRowCount* response, std::function) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, std::function) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, std::function) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // @return TableNameList + virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // @return Status + virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, std::function) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, std::function) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; }; virtual class experimental_async_interface* experimental_async() { return nullptr; } private: @@ -446,32 +471,38 @@ class MilvusService final { virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>* AsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>* PrepareAsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>* AsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>* PrepareAsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>* AsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>* PrepareAsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>* AsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>* PrepareAsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>* AsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>* PrepareAsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>* AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>* PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; }; class Stub final : public StubInterface { public: @@ -490,41 +521,6 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>> PrepareAsyncHasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>>(PrepareAsyncHasTableRaw(context, request, cq)); } - ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); - } - ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); - } - ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); - } - ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); - } - ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); - } ::grpc::Status DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>> AsyncDescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>>(AsyncDescribeTableRaw(context, request, cq)); @@ -546,26 +542,19 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>> PrepareAsyncShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>>(PrepareAsyncShowTablesRaw(context, request, cq)); } - ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); } - ::grpc::Status DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDeleteByRangeRaw(context, request, cq)); + ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDeleteByRangeRaw(context, request, cq)); - } - ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); } ::grpc::Status DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>> AsyncDescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { @@ -581,6 +570,69 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropIndexRaw(context, request, cq)); } + ::grpc::Status CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreatePartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreatePartitionRaw(context, request, cq)); + } + ::grpc::Status ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>> AsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>>(AsyncShowPartitionsRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>> PrepareAsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>>(PrepareAsyncShowPartitionsRaw(context, request, cq)); + } + ::grpc::Status DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropPartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropPartitionRaw(context, request, cq)); + } + ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); + } + ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); + } + ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); + } + ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + } + ::grpc::Status DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDeleteByDateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDeleteByDateRaw(context, request, cq)); + } + ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + } class experimental_async final : public StubInterface::experimental_async_interface { public: @@ -592,26 +644,6 @@ class MilvusService final { void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, std::function) override; void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; - void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) override; - void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) override; - void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) override; - void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, std::function) override; void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, std::function) override; void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; @@ -624,18 +656,14 @@ class MilvusService final { void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, std::function) override; void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) override; - void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) override; - void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function) override; - void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; - void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; + void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) override; + void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, std::function) override; void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, std::function) override; void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; @@ -644,6 +672,42 @@ class MilvusService final { void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) override; + void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function) override; + void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function) override; + void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) override; + void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) override; + void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) override; + void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) override; + void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) override; + void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function) override; + void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; + void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; private: friend class Stub; explicit experimental_async(Stub* stub): stub_(stub) { } @@ -659,47 +723,56 @@ class MilvusService final { ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>* AsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>* PrepareAsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>* AsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>* PrepareAsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableRowCount>* AsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableRowCount>* PrepareAsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>* AsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>* PrepareAsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>* AsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>* PrepareAsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; const ::grpc::internal::RpcMethod rpcmethod_CreateTable_; const ::grpc::internal::RpcMethod rpcmethod_HasTable_; - const ::grpc::internal::RpcMethod rpcmethod_DropTable_; - const ::grpc::internal::RpcMethod rpcmethod_CreateIndex_; - const ::grpc::internal::RpcMethod rpcmethod_Insert_; - const ::grpc::internal::RpcMethod rpcmethod_Search_; - const ::grpc::internal::RpcMethod rpcmethod_SearchInFiles_; const ::grpc::internal::RpcMethod rpcmethod_DescribeTable_; const ::grpc::internal::RpcMethod rpcmethod_CountTable_; const ::grpc::internal::RpcMethod rpcmethod_ShowTables_; - const ::grpc::internal::RpcMethod rpcmethod_Cmd_; - const ::grpc::internal::RpcMethod rpcmethod_DeleteByRange_; - const ::grpc::internal::RpcMethod rpcmethod_PreloadTable_; + const ::grpc::internal::RpcMethod rpcmethod_DropTable_; + const ::grpc::internal::RpcMethod rpcmethod_CreateIndex_; const ::grpc::internal::RpcMethod rpcmethod_DescribeIndex_; const ::grpc::internal::RpcMethod rpcmethod_DropIndex_; + const ::grpc::internal::RpcMethod rpcmethod_CreatePartition_; + const ::grpc::internal::RpcMethod rpcmethod_ShowPartitions_; + const ::grpc::internal::RpcMethod rpcmethod_DropPartition_; + const ::grpc::internal::RpcMethod rpcmethod_Insert_; + const ::grpc::internal::RpcMethod rpcmethod_Search_; + const ::grpc::internal::RpcMethod rpcmethod_SearchInFiles_; + const ::grpc::internal::RpcMethod rpcmethod_Cmd_; + const ::grpc::internal::RpcMethod rpcmethod_DeleteByDate_; + const ::grpc::internal::RpcMethod rpcmethod_PreloadTable_; }; static std::unique_ptr NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); @@ -708,137 +781,136 @@ class MilvusService final { Service(); virtual ~Service(); // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response); // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response); // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response); - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response); - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response); - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response); - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response); // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response); // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response); // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response); + // @return TableNameList + virtual ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response); + // @return Status + virtual ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response); // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response); // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual ::grpc::Status CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual ::grpc::Status ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response); + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual ::grpc::Status DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response); + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response); + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response); + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response); + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual ::grpc::Status DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); }; template class WithAsyncMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_CreateTable() { ::grpc::Service::MarkMethodAsync(0); @@ -847,7 +919,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -858,7 +930,7 @@ class MilvusService final { template class WithAsyncMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_HasTable() { ::grpc::Service::MarkMethodAsync(1); @@ -867,7 +939,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -876,270 +948,330 @@ class MilvusService final { } }; template - class WithAsyncMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_DropTable() { - ::grpc::Service::MarkMethodAsync(2); - } - ~WithAsyncMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestDropTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_CreateIndex() { - ::grpc::Service::MarkMethodAsync(3); - } - ~WithAsyncMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestCreateIndex(::grpc::ServerContext* context, ::milvus::grpc::IndexParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_Insert() { - ::grpc::Service::MarkMethodAsync(4); - } - ~WithAsyncMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestInsert(::grpc::ServerContext* context, ::milvus::grpc::InsertParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::VectorIds>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_Search() { - ::grpc::Service::MarkMethodAsync(5); - } - ~WithAsyncMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearch(::grpc::ServerContext* context, ::milvus::grpc::SearchParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_SearchInFiles() { - ::grpc::Service::MarkMethodAsync(6); - } - ~WithAsyncMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearchInFiles(::grpc::ServerContext* context, ::milvus::grpc::SearchInFilesParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template class WithAsyncMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DescribeTable() { - ::grpc::Service::MarkMethodAsync(7); + ::grpc::Service::MarkMethodAsync(2); } ~WithAsyncMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableSchema>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_CountTable() { - ::grpc::Service::MarkMethodAsync(8); + ::grpc::Service::MarkMethodAsync(3); } ~WithAsyncMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestCountTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableRowCount>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_ShowTables() { - ::grpc::Service::MarkMethodAsync(9); + ::grpc::Service::MarkMethodAsync(4); } ~WithAsyncMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestShowTables(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableNameList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithAsyncMethod_Cmd : public BaseClass { + class WithAsyncMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithAsyncMethod_Cmd() { - ::grpc::Service::MarkMethodAsync(10); + WithAsyncMethod_DropTable() { + ::grpc::Service::MarkMethodAsync(5); } - ~WithAsyncMethod_Cmd() override { + ~WithAsyncMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestCmd(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::StringReply>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + void RequestDropTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithAsyncMethod_DeleteByRange : public BaseClass { + class WithAsyncMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithAsyncMethod_DeleteByRange() { - ::grpc::Service::MarkMethodAsync(11); + WithAsyncMethod_CreateIndex() { + ::grpc::Service::MarkMethodAsync(6); } - ~WithAsyncMethod_DeleteByRange() override { + ~WithAsyncMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestDeleteByRange(::grpc::ServerContext* context, ::milvus::grpc::DeleteByRangeParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_PreloadTable() { - ::grpc::Service::MarkMethodAsync(12); - } - ~WithAsyncMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestPreloadTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + void RequestCreateIndex(::grpc::ServerContext* context, ::milvus::grpc::IndexParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DescribeIndex() { - ::grpc::Service::MarkMethodAsync(13); + ::grpc::Service::MarkMethodAsync(7); } ~WithAsyncMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeIndex(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::IndexParam>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DropIndex() { - ::grpc::Service::MarkMethodAsync(14); + ::grpc::Service::MarkMethodAsync(8); } ~WithAsyncMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDropIndex(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_CreatePartition() { + ::grpc::Service::MarkMethodAsync(9); + } + ~WithAsyncMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCreatePartition(::grpc::ServerContext* context, ::milvus::grpc::PartitionParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ShowPartitions() { + ::grpc::Service::MarkMethodAsync(10); + } + ~WithAsyncMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShowPartitions(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::PartitionList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_DropPartition() { + ::grpc::Service::MarkMethodAsync(11); + } + ~WithAsyncMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDropPartition(::grpc::ServerContext* context, ::milvus::grpc::PartitionParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Insert() { + ::grpc::Service::MarkMethodAsync(12); + } + ~WithAsyncMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsert(::grpc::ServerContext* context, ::milvus::grpc::InsertParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::VectorIds>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Search() { + ::grpc::Service::MarkMethodAsync(13); + } + ~WithAsyncMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearch(::grpc::ServerContext* context, ::milvus::grpc::SearchParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_SearchInFiles() { + ::grpc::Service::MarkMethodAsync(14); + } + ~WithAsyncMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearchInFiles(::grpc::ServerContext* context, ::milvus::grpc::SearchInFilesParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); } }; - typedef WithAsyncMethod_CreateTable > > > > > > > > > > > > > > AsyncService; + template + class WithAsyncMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Cmd() { + ::grpc::Service::MarkMethodAsync(15); + } + ~WithAsyncMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCmd(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::StringReply>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(15, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_DeleteByDate() { + ::grpc::Service::MarkMethodAsync(16); + } + ~WithAsyncMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteByDate(::grpc::ServerContext* context, ::milvus::grpc::DeleteByDateParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(16, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_PreloadTable() { + ::grpc::Service::MarkMethodAsync(17); + } + ~WithAsyncMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPreloadTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(17, context, request, response, new_call_cq, notification_cq, tag); + } + }; + typedef WithAsyncMethod_CreateTable > > > > > > > > > > > > > > > > > AsyncService; template class ExperimentalWithCallbackMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_CreateTable() { ::grpc::Service::experimental().MarkMethodCallback(0, @@ -1161,16 +1293,16 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_HasTable() { ::grpc::Service::experimental().MarkMethodCallback(1, @@ -1192,174 +1324,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_DropTable() { - ::grpc::Service::experimental().MarkMethodCallback(2, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->DropTable(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_DropTable( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(2)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_CreateIndex() { - ::grpc::Service::experimental().MarkMethodCallback(3, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::IndexParam* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->CreateIndex(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_CreateIndex( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(3)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_Insert() { - ::grpc::Service::experimental().MarkMethodCallback(4, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::InsertParam* request, - ::milvus::grpc::VectorIds* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Insert(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Insert( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>*>( - ::grpc::Service::experimental().GetHandler(4)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_Search() { - ::grpc::Service::experimental().MarkMethodCallback(5, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::SearchParam* request, - ::milvus::grpc::TopKQueryResultList* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Search(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Search( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>*>( - ::grpc::Service::experimental().GetHandler(5)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_SearchInFiles() { - ::grpc::Service::experimental().MarkMethodCallback(6, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::SearchInFilesParam* request, - ::milvus::grpc::TopKQueryResultList* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->SearchInFiles(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_SearchInFiles( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>*>( - ::grpc::Service::experimental().GetHandler(6)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DescribeTable() { - ::grpc::Service::experimental().MarkMethodCallback(7, + ::grpc::Service::experimental().MarkMethodCallback(2, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1371,26 +1348,26 @@ class MilvusService final { void SetMessageAllocatorFor_DescribeTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>*>( - ::grpc::Service::experimental().GetHandler(7)) + ::grpc::Service::experimental().GetHandler(2)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_CountTable() { - ::grpc::Service::experimental().MarkMethodCallback(8, + ::grpc::Service::experimental().MarkMethodCallback(3, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1402,26 +1379,26 @@ class MilvusService final { void SetMessageAllocatorFor_CountTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>*>( - ::grpc::Service::experimental().GetHandler(8)) + ::grpc::Service::experimental().GetHandler(3)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_ShowTables() { - ::grpc::Service::experimental().MarkMethodCallback(9, + ::grpc::Service::experimental().MarkMethodCallback(4, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>( [this](::grpc::ServerContext* context, const ::milvus::grpc::Command* request, @@ -1433,119 +1410,88 @@ class MilvusService final { void SetMessageAllocatorFor_ShowTables( ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>*>( - ::grpc::Service::experimental().GetHandler(9)) + ::grpc::Service::experimental().GetHandler(4)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithCallbackMethod_Cmd : public BaseClass { + class ExperimentalWithCallbackMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithCallbackMethod_Cmd() { - ::grpc::Service::experimental().MarkMethodCallback(10, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::Command* request, - ::milvus::grpc::StringReply* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Cmd(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Cmd( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::StringReply>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>*>( - ::grpc::Service::experimental().GetHandler(10)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Cmd() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_DeleteByRange : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_DeleteByRange() { - ::grpc::Service::experimental().MarkMethodCallback(11, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->DeleteByRange(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_DeleteByRange( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(11)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_DeleteByRange() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_PreloadTable() { - ::grpc::Service::experimental().MarkMethodCallback(12, + ExperimentalWithCallbackMethod_DropTable() { + ::grpc::Service::experimental().MarkMethodCallback(5, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->PreloadTable(context, request, response, controller); + return this->DropTable(context, request, response, controller); })); } - void SetMessageAllocatorFor_PreloadTable( + void SetMessageAllocatorFor_DropTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(12)) + ::grpc::Service::experimental().GetHandler(5)) ->SetMessageAllocator(allocator); } - ~ExperimentalWithCallbackMethod_PreloadTable() override { + ~ExperimentalWithCallbackMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_CreateIndex : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_CreateIndex() { + ::grpc::Service::experimental().MarkMethodCallback(6, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::IndexParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->CreateIndex(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_CreateIndex( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(6)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_CreateIndex() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DescribeIndex() { - ::grpc::Service::experimental().MarkMethodCallback(13, + ::grpc::Service::experimental().MarkMethodCallback(7, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1557,26 +1503,26 @@ class MilvusService final { void SetMessageAllocatorFor_DescribeIndex( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>*>( - ::grpc::Service::experimental().GetHandler(13)) + ::grpc::Service::experimental().GetHandler(7)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DropIndex() { - ::grpc::Service::experimental().MarkMethodCallback(14, + ::grpc::Service::experimental().MarkMethodCallback(8, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1588,24 +1534,303 @@ class MilvusService final { void SetMessageAllocatorFor_DropIndex( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(14)) + ::grpc::Service::experimental().GetHandler(8)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; - typedef ExperimentalWithCallbackMethod_CreateTable > > > > > > > > > > > > > > ExperimentalCallbackService; + template + class ExperimentalWithCallbackMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_CreatePartition() { + ::grpc::Service::experimental().MarkMethodCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->CreatePartition(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_CreatePartition( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(9)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ShowPartitions() { + ::grpc::Service::experimental().MarkMethodCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->ShowPartitions(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_ShowPartitions( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>*>( + ::grpc::Service::experimental().GetHandler(10)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_DropPartition() { + ::grpc::Service::experimental().MarkMethodCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->DropPartition(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_DropPartition( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(11)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Insert() { + ::grpc::Service::experimental().MarkMethodCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::InsertParam* request, + ::milvus::grpc::VectorIds* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Insert(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Insert( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>*>( + ::grpc::Service::experimental().GetHandler(12)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Search() { + ::grpc::Service::experimental().MarkMethodCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::SearchParam* request, + ::milvus::grpc::TopKQueryResultList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Search(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Search( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>*>( + ::grpc::Service::experimental().GetHandler(13)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_SearchInFiles() { + ::grpc::Service::experimental().MarkMethodCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::SearchInFilesParam* request, + ::milvus::grpc::TopKQueryResultList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->SearchInFiles(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_SearchInFiles( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>*>( + ::grpc::Service::experimental().GetHandler(14)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Cmd() { + ::grpc::Service::experimental().MarkMethodCallback(15, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::Command* request, + ::milvus::grpc::StringReply* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Cmd(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Cmd( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::StringReply>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>*>( + ::grpc::Service::experimental().GetHandler(15)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_DeleteByDate() { + ::grpc::Service::experimental().MarkMethodCallback(16, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->DeleteByDate(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_DeleteByDate( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(16)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_PreloadTable() { + ::grpc::Service::experimental().MarkMethodCallback(17, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->PreloadTable(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_PreloadTable( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(17)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + typedef ExperimentalWithCallbackMethod_CreateTable > > > > > > > > > > > > > > > > > ExperimentalCallbackService; template class WithGenericMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_CreateTable() { ::grpc::Service::MarkMethodGeneric(0); @@ -1614,7 +1839,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1622,7 +1847,7 @@ class MilvusService final { template class WithGenericMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_HasTable() { ::grpc::Service::MarkMethodGeneric(1); @@ -1631,92 +1856,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_DropTable() { - ::grpc::Service::MarkMethodGeneric(2); - } - ~WithGenericMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_CreateIndex() { - ::grpc::Service::MarkMethodGeneric(3); - } - ~WithGenericMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_Insert() { - ::grpc::Service::MarkMethodGeneric(4); - } - ~WithGenericMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_Search() { - ::grpc::Service::MarkMethodGeneric(5); - } - ~WithGenericMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_SearchInFiles() { - ::grpc::Service::MarkMethodGeneric(6); - } - ~WithGenericMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1724,16 +1864,16 @@ class MilvusService final { template class WithGenericMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DescribeTable() { - ::grpc::Service::MarkMethodGeneric(7); + ::grpc::Service::MarkMethodGeneric(2); } ~WithGenericMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1741,16 +1881,16 @@ class MilvusService final { template class WithGenericMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_CountTable() { - ::grpc::Service::MarkMethodGeneric(8); + ::grpc::Service::MarkMethodGeneric(3); } ~WithGenericMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1758,67 +1898,50 @@ class MilvusService final { template class WithGenericMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_ShowTables() { - ::grpc::Service::MarkMethodGeneric(9); + ::grpc::Service::MarkMethodGeneric(4); } ~WithGenericMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } }; template - class WithGenericMethod_Cmd : public BaseClass { + class WithGenericMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithGenericMethod_Cmd() { - ::grpc::Service::MarkMethodGeneric(10); + WithGenericMethod_DropTable() { + ::grpc::Service::MarkMethodGeneric(5); } - ~WithGenericMethod_Cmd() override { + ~WithGenericMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } }; template - class WithGenericMethod_DeleteByRange : public BaseClass { + class WithGenericMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithGenericMethod_DeleteByRange() { - ::grpc::Service::MarkMethodGeneric(11); + WithGenericMethod_CreateIndex() { + ::grpc::Service::MarkMethodGeneric(6); } - ~WithGenericMethod_DeleteByRange() override { + ~WithGenericMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_PreloadTable() { - ::grpc::Service::MarkMethodGeneric(12); - } - ~WithGenericMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1826,16 +1949,16 @@ class MilvusService final { template class WithGenericMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DescribeIndex() { - ::grpc::Service::MarkMethodGeneric(13); + ::grpc::Service::MarkMethodGeneric(7); } ~WithGenericMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1843,16 +1966,169 @@ class MilvusService final { template class WithGenericMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DropIndex() { - ::grpc::Service::MarkMethodGeneric(14); + ::grpc::Service::MarkMethodGeneric(8); } ~WithGenericMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_CreatePartition() { + ::grpc::Service::MarkMethodGeneric(9); + } + ~WithGenericMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ShowPartitions() { + ::grpc::Service::MarkMethodGeneric(10); + } + ~WithGenericMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_DropPartition() { + ::grpc::Service::MarkMethodGeneric(11); + } + ~WithGenericMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Insert() { + ::grpc::Service::MarkMethodGeneric(12); + } + ~WithGenericMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Search() { + ::grpc::Service::MarkMethodGeneric(13); + } + ~WithGenericMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_SearchInFiles() { + ::grpc::Service::MarkMethodGeneric(14); + } + ~WithGenericMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Cmd() { + ::grpc::Service::MarkMethodGeneric(15); + } + ~WithGenericMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_DeleteByDate() { + ::grpc::Service::MarkMethodGeneric(16); + } + ~WithGenericMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_PreloadTable() { + ::grpc::Service::MarkMethodGeneric(17); + } + ~WithGenericMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1860,7 +2136,7 @@ class MilvusService final { template class WithRawMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_CreateTable() { ::grpc::Service::MarkMethodRaw(0); @@ -1869,7 +2145,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1880,7 +2156,7 @@ class MilvusService final { template class WithRawMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_HasTable() { ::grpc::Service::MarkMethodRaw(1); @@ -1889,7 +2165,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1898,269 +2174,329 @@ class MilvusService final { } }; template - class WithRawMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_DropTable() { - ::grpc::Service::MarkMethodRaw(2); - } - ~WithRawMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestDropTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_CreateIndex() { - ::grpc::Service::MarkMethodRaw(3); - } - ~WithRawMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestCreateIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_Insert() { - ::grpc::Service::MarkMethodRaw(4); - } - ~WithRawMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestInsert(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_Search() { - ::grpc::Service::MarkMethodRaw(5); - } - ~WithRawMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearch(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_SearchInFiles() { - ::grpc::Service::MarkMethodRaw(6); - } - ~WithRawMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearchInFiles(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template class WithRawMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DescribeTable() { - ::grpc::Service::MarkMethodRaw(7); + ::grpc::Service::MarkMethodRaw(2); } ~WithRawMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_CountTable() { - ::grpc::Service::MarkMethodRaw(8); + ::grpc::Service::MarkMethodRaw(3); } ~WithRawMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestCountTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_ShowTables() { - ::grpc::Service::MarkMethodRaw(9); + ::grpc::Service::MarkMethodRaw(4); } ~WithRawMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestShowTables(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithRawMethod_Cmd : public BaseClass { + class WithRawMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithRawMethod_Cmd() { - ::grpc::Service::MarkMethodRaw(10); + WithRawMethod_DropTable() { + ::grpc::Service::MarkMethodRaw(5); } - ~WithRawMethod_Cmd() override { + ~WithRawMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestCmd(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + void RequestDropTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithRawMethod_DeleteByRange : public BaseClass { + class WithRawMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithRawMethod_DeleteByRange() { - ::grpc::Service::MarkMethodRaw(11); + WithRawMethod_CreateIndex() { + ::grpc::Service::MarkMethodRaw(6); } - ~WithRawMethod_DeleteByRange() override { + ~WithRawMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestDeleteByRange(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_PreloadTable() { - ::grpc::Service::MarkMethodRaw(12); - } - ~WithRawMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestPreloadTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + void RequestCreateIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DescribeIndex() { - ::grpc::Service::MarkMethodRaw(13); + ::grpc::Service::MarkMethodRaw(7); } ~WithRawMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DropIndex() { - ::grpc::Service::MarkMethodRaw(14); + ::grpc::Service::MarkMethodRaw(8); } ~WithRawMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDropIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_CreatePartition() { + ::grpc::Service::MarkMethodRaw(9); + } + ~WithRawMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCreatePartition(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ShowPartitions() { + ::grpc::Service::MarkMethodRaw(10); + } + ~WithRawMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShowPartitions(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_DropPartition() { + ::grpc::Service::MarkMethodRaw(11); + } + ~WithRawMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDropPartition(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Insert() { + ::grpc::Service::MarkMethodRaw(12); + } + ~WithRawMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsert(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Search() { + ::grpc::Service::MarkMethodRaw(13); + } + ~WithRawMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearch(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_SearchInFiles() { + ::grpc::Service::MarkMethodRaw(14); + } + ~WithRawMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearchInFiles(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); } }; template + class WithRawMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Cmd() { + ::grpc::Service::MarkMethodRaw(15); + } + ~WithRawMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCmd(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(15, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_DeleteByDate() { + ::grpc::Service::MarkMethodRaw(16); + } + ~WithRawMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteByDate(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(16, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_PreloadTable() { + ::grpc::Service::MarkMethodRaw(17); + } + ~WithRawMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPreloadTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(17, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class ExperimentalWithRawCallbackMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_CreateTable() { ::grpc::Service::experimental().MarkMethodRawCallback(0, @@ -2176,16 +2512,16 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CreateTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_HasTable() { ::grpc::Service::experimental().MarkMethodRawCallback(1, @@ -2201,144 +2537,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void HasTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_DropTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(2, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->DropTable(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DropTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_CreateIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(3, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->CreateIndex(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void CreateIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_Insert() { - ::grpc::Service::experimental().MarkMethodRawCallback(4, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Insert(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Insert(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_Search() { - ::grpc::Service::experimental().MarkMethodRawCallback(5, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Search(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Search(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_SearchInFiles() { - ::grpc::Service::experimental().MarkMethodRawCallback(6, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->SearchInFiles(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void SearchInFiles(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void HasTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DescribeTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(7, + ::grpc::Service::experimental().MarkMethodRawCallback(2, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2351,19 +2562,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_CountTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(8, + ::grpc::Service::experimental().MarkMethodRawCallback(3, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2376,19 +2587,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CountTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CountTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_ShowTables() { - ::grpc::Service::experimental().MarkMethodRawCallback(9, + ::grpc::Service::experimental().MarkMethodRawCallback(4, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2401,94 +2612,69 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void ShowTables(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void ShowTables(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithRawCallbackMethod_Cmd : public BaseClass { + class ExperimentalWithRawCallbackMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithRawCallbackMethod_Cmd() { - ::grpc::Service::experimental().MarkMethodRawCallback(10, + ExperimentalWithRawCallbackMethod_DropTable() { + ::grpc::Service::experimental().MarkMethodRawCallback(5, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Cmd(context, request, response, controller); + this->DropTable(context, request, response, controller); })); } - ~ExperimentalWithRawCallbackMethod_Cmd() override { + ~ExperimentalWithRawCallbackMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void Cmd(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithRawCallbackMethod_DeleteByRange : public BaseClass { + class ExperimentalWithRawCallbackMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithRawCallbackMethod_DeleteByRange() { - ::grpc::Service::experimental().MarkMethodRawCallback(11, + ExperimentalWithRawCallbackMethod_CreateIndex() { + ::grpc::Service::experimental().MarkMethodRawCallback(6, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - this->DeleteByRange(context, request, response, controller); + this->CreateIndex(context, request, response, controller); })); } - ~ExperimentalWithRawCallbackMethod_DeleteByRange() override { + ~ExperimentalWithRawCallbackMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DeleteByRange(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_PreloadTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(12, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->PreloadTable(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void PreloadTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DescribeIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(13, + ::grpc::Service::experimental().MarkMethodRawCallback(7, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2501,19 +2687,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DropIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(14, + ::grpc::Service::experimental().MarkMethodRawCallback(8, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2526,16 +2712,241 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DropIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_CreatePartition() { + ::grpc::Service::experimental().MarkMethodRawCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->CreatePartition(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreatePartition(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ShowPartitions() { + ::grpc::Service::experimental().MarkMethodRawCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->ShowPartitions(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void ShowPartitions(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_DropPartition() { + ::grpc::Service::experimental().MarkMethodRawCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->DropPartition(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DropPartition(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Insert() { + ::grpc::Service::experimental().MarkMethodRawCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Insert(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Insert(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Search() { + ::grpc::Service::experimental().MarkMethodRawCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Search(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Search(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_SearchInFiles() { + ::grpc::Service::experimental().MarkMethodRawCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->SearchInFiles(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void SearchInFiles(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Cmd() { + ::grpc::Service::experimental().MarkMethodRawCallback(15, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Cmd(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Cmd(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_DeleteByDate() { + ::grpc::Service::experimental().MarkMethodRawCallback(16, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->DeleteByDate(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DeleteByDate(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_PreloadTable() { + ::grpc::Service::experimental().MarkMethodRawCallback(17, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->PreloadTable(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void PreloadTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class WithStreamedUnaryMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_CreateTable() { ::grpc::Service::MarkMethodStreamed(0, @@ -2545,7 +2956,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2555,7 +2966,7 @@ class MilvusService final { template class WithStreamedUnaryMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_HasTable() { ::grpc::Service::MarkMethodStreamed(1, @@ -2565,7 +2976,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2573,119 +2984,19 @@ class MilvusService final { virtual ::grpc::Status StreamedHasTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::BoolReply>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_DropTable() { - ::grpc::Service::MarkMethodStreamed(2, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropTable::StreamedDropTable, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedDropTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_CreateIndex() { - ::grpc::Service::MarkMethodStreamed(3, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreateIndex::StreamedCreateIndex, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedCreateIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::IndexParam,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_Insert() { - ::grpc::Service::MarkMethodStreamed(4, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(std::bind(&WithStreamedUnaryMethod_Insert::StreamedInsert, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedInsert(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::InsertParam,::milvus::grpc::VectorIds>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_Search() { - ::grpc::Service::MarkMethodStreamed(5, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_Search::StreamedSearch, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedSearch(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_SearchInFiles() { - ::grpc::Service::MarkMethodStreamed(6, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_SearchInFiles::StreamedSearchInFiles, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedSearchInFiles(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchInFilesParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; - }; - template class WithStreamedUnaryMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DescribeTable() { - ::grpc::Service::MarkMethodStreamed(7, + ::grpc::Service::MarkMethodStreamed(2, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>(std::bind(&WithStreamedUnaryMethod_DescribeTable::StreamedDescribeTable, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2695,17 +3006,17 @@ class MilvusService final { template class WithStreamedUnaryMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_CountTable() { - ::grpc::Service::MarkMethodStreamed(8, + ::grpc::Service::MarkMethodStreamed(3, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>(std::bind(&WithStreamedUnaryMethod_CountTable::StreamedCountTable, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2715,17 +3026,17 @@ class MilvusService final { template class WithStreamedUnaryMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_ShowTables() { - ::grpc::Service::MarkMethodStreamed(9, + ::grpc::Service::MarkMethodStreamed(4, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>(std::bind(&WithStreamedUnaryMethod_ShowTables::StreamedShowTables, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2733,79 +3044,59 @@ class MilvusService final { virtual ::grpc::Status StreamedShowTables(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::TableNameList>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_Cmd : public BaseClass { + class WithStreamedUnaryMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithStreamedUnaryMethod_Cmd() { - ::grpc::Service::MarkMethodStreamed(10, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>(std::bind(&WithStreamedUnaryMethod_Cmd::StreamedCmd, this, std::placeholders::_1, std::placeholders::_2))); + WithStreamedUnaryMethod_DropTable() { + ::grpc::Service::MarkMethodStreamed(5, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropTable::StreamedDropTable, this, std::placeholders::_1, std::placeholders::_2))); } - ~WithStreamedUnaryMethod_Cmd() override { + ~WithStreamedUnaryMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary - virtual ::grpc::Status StreamedCmd(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::StringReply>* server_unary_streamer) = 0; + virtual ::grpc::Status StreamedDropTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_DeleteByRange : public BaseClass { + class WithStreamedUnaryMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithStreamedUnaryMethod_DeleteByRange() { - ::grpc::Service::MarkMethodStreamed(11, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DeleteByRange::StreamedDeleteByRange, this, std::placeholders::_1, std::placeholders::_2))); + WithStreamedUnaryMethod_CreateIndex() { + ::grpc::Service::MarkMethodStreamed(6, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreateIndex::StreamedCreateIndex, this, std::placeholders::_1, std::placeholders::_2))); } - ~WithStreamedUnaryMethod_DeleteByRange() override { + ~WithStreamedUnaryMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary - virtual ::grpc::Status StreamedDeleteByRange(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::DeleteByRangeParam,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_PreloadTable() { - ::grpc::Service::MarkMethodStreamed(12, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_PreloadTable::StreamedPreloadTable, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedPreloadTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; + virtual ::grpc::Status StreamedCreateIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::IndexParam,::milvus::grpc::Status>* server_unary_streamer) = 0; }; template class WithStreamedUnaryMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DescribeIndex() { - ::grpc::Service::MarkMethodStreamed(13, + ::grpc::Service::MarkMethodStreamed(7, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>(std::bind(&WithStreamedUnaryMethod_DescribeIndex::StreamedDescribeIndex, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2815,26 +3106,206 @@ class MilvusService final { template class WithStreamedUnaryMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DropIndex() { - ::grpc::Service::MarkMethodStreamed(14, + ::grpc::Service::MarkMethodStreamed(8, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropIndex::StreamedDropIndex, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary virtual ::grpc::Status StreamedDropIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; }; - typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > StreamedUnaryService; + template + class WithStreamedUnaryMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_CreatePartition() { + ::grpc::Service::MarkMethodStreamed(9, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreatePartition::StreamedCreatePartition, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedCreatePartition(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::PartitionParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ShowPartitions() { + ::grpc::Service::MarkMethodStreamed(10, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>(std::bind(&WithStreamedUnaryMethod_ShowPartitions::StreamedShowPartitions, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedShowPartitions(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::PartitionList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_DropPartition() { + ::grpc::Service::MarkMethodStreamed(11, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropPartition::StreamedDropPartition, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedDropPartition(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::PartitionParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Insert() { + ::grpc::Service::MarkMethodStreamed(12, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(std::bind(&WithStreamedUnaryMethod_Insert::StreamedInsert, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedInsert(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::InsertParam,::milvus::grpc::VectorIds>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Search() { + ::grpc::Service::MarkMethodStreamed(13, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_Search::StreamedSearch, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedSearch(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_SearchInFiles() { + ::grpc::Service::MarkMethodStreamed(14, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_SearchInFiles::StreamedSearchInFiles, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedSearchInFiles(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchInFilesParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Cmd() { + ::grpc::Service::MarkMethodStreamed(15, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>(std::bind(&WithStreamedUnaryMethod_Cmd::StreamedCmd, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedCmd(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::StringReply>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_DeleteByDate() { + ::grpc::Service::MarkMethodStreamed(16, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DeleteByDate::StreamedDeleteByDate, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedDeleteByDate(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::DeleteByDateParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_PreloadTable() { + ::grpc::Service::MarkMethodStreamed(17, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_PreloadTable::StreamedPreloadTable, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedPreloadTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > > > > StreamedUnaryService; typedef Service SplitStreamedService; - typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > StreamedService; + typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > > > > StreamedService; }; } // namespace grpc diff --git a/core/src/grpc/gen-milvus/milvus.pb.cc b/core/src/grpc/gen-milvus/milvus.pb.cc index fe416a4773..c381c4f4db 100644 --- a/core/src/grpc/gen-milvus/milvus.pb.cc +++ b/core/src/grpc/gen-milvus/milvus.pb.cc @@ -16,6 +16,7 @@ // @@protoc_insertion_point(includes) #include extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Index_milvus_2eproto; +extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionParam_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_QueryResult_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Range_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RowRecord_milvus_2eproto; @@ -28,6 +29,10 @@ class TableNameDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _TableName_default_instance_; +class PartitionNameDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionName_default_instance_; class TableNameListDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; @@ -36,6 +41,14 @@ class TableSchemaDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _TableSchema_default_instance_; +class PartitionParamDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionParam_default_instance_; +class PartitionListDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionList_default_instance_; class RangeDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; @@ -96,10 +109,10 @@ class IndexParamDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _IndexParam_default_instance_; -class DeleteByRangeParamDefaultTypeInternal { +class DeleteByDateParamDefaultTypeInternal { public: - ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; -} _DeleteByRangeParam_default_instance_; + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _DeleteByDateParam_default_instance_; } // namespace grpc } // namespace milvus static void InitDefaultsscc_info_BoolReply_milvus_2eproto() { @@ -131,19 +144,19 @@ static void InitDefaultsscc_info_Command_milvus_2eproto() { ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Command_milvus_2eproto = {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_Command_milvus_2eproto}, {}}; -static void InitDefaultsscc_info_DeleteByRangeParam_milvus_2eproto() { +static void InitDefaultsscc_info_DeleteByDateParam_milvus_2eproto() { GOOGLE_PROTOBUF_VERIFY_VERSION; { - void* ptr = &::milvus::grpc::_DeleteByRangeParam_default_instance_; - new (ptr) ::milvus::grpc::DeleteByRangeParam(); + void* ptr = &::milvus::grpc::_DeleteByDateParam_default_instance_; + new (ptr) ::milvus::grpc::DeleteByDateParam(); ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); } - ::milvus::grpc::DeleteByRangeParam::InitAsDefaultInstance(); + ::milvus::grpc::DeleteByDateParam::InitAsDefaultInstance(); } -::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DeleteByRangeParam_milvus_2eproto = - {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_DeleteByRangeParam_milvus_2eproto}, { +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DeleteByDateParam_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_DeleteByDateParam_milvus_2eproto}, { &scc_info_Range_milvus_2eproto.base,}}; static void InitDefaultsscc_info_Index_milvus_2eproto() { @@ -191,6 +204,50 @@ static void InitDefaultsscc_info_InsertParam_milvus_2eproto() { {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_InsertParam_milvus_2eproto}, { &scc_info_RowRecord_milvus_2eproto.base,}}; +static void InitDefaultsscc_info_PartitionList_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionList_default_instance_; + new (ptr) ::milvus::grpc::PartitionList(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionList::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_PartitionList_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_PartitionList_milvus_2eproto}, { + &scc_info_Status_status_2eproto.base, + &scc_info_PartitionParam_milvus_2eproto.base,}}; + +static void InitDefaultsscc_info_PartitionName_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionName_default_instance_; + new (ptr) ::milvus::grpc::PartitionName(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionName::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionName_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_PartitionName_milvus_2eproto}, {}}; + +static void InitDefaultsscc_info_PartitionParam_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionParam_default_instance_; + new (ptr) ::milvus::grpc::PartitionParam(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionParam::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionParam_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_PartitionParam_milvus_2eproto}, {}}; + static void InitDefaultsscc_info_QueryResult_milvus_2eproto() { GOOGLE_PROTOBUF_VERIFY_VERSION; @@ -384,7 +441,7 @@ static void InitDefaultsscc_info_VectorIds_milvus_2eproto() { {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_VectorIds_milvus_2eproto}, { &scc_info_Status_status_2eproto.base,}}; -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_milvus_2eproto[19]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_milvus_2eproto[22]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_milvus_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_milvus_2eproto = nullptr; @@ -396,6 +453,12 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT ~0u, // no _weak_field_map_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableName, table_name_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionName, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionName, partition_name_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableNameList, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -413,6 +476,21 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableSchema, index_file_size_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableSchema, metric_type_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, table_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, partition_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, tag_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, status_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, partition_array_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::Range, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -433,6 +511,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, table_name_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, row_record_array_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, row_id_array_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, partition_tag_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::VectorIds, _internal_metadata_), ~0u, // no _extensions_ @@ -450,6 +529,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, query_range_array_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, topk_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, nprobe_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, partition_tag_array_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchInFilesParam, _internal_metadata_), ~0u, // no _extensions_ @@ -520,39 +600,45 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::IndexParam, table_name_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::IndexParam, index_), ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, _internal_metadata_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, range_), - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, table_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, range_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, table_name_), }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::milvus::grpc::TableName)}, - { 6, -1, sizeof(::milvus::grpc::TableNameList)}, - { 13, -1, sizeof(::milvus::grpc::TableSchema)}, - { 23, -1, sizeof(::milvus::grpc::Range)}, - { 30, -1, sizeof(::milvus::grpc::RowRecord)}, - { 36, -1, sizeof(::milvus::grpc::InsertParam)}, - { 44, -1, sizeof(::milvus::grpc::VectorIds)}, - { 51, -1, sizeof(::milvus::grpc::SearchParam)}, - { 61, -1, sizeof(::milvus::grpc::SearchInFilesParam)}, - { 68, -1, sizeof(::milvus::grpc::QueryResult)}, - { 75, -1, sizeof(::milvus::grpc::TopKQueryResult)}, - { 81, -1, sizeof(::milvus::grpc::TopKQueryResultList)}, - { 88, -1, sizeof(::milvus::grpc::StringReply)}, - { 95, -1, sizeof(::milvus::grpc::BoolReply)}, - { 102, -1, sizeof(::milvus::grpc::TableRowCount)}, - { 109, -1, sizeof(::milvus::grpc::Command)}, - { 115, -1, sizeof(::milvus::grpc::Index)}, - { 122, -1, sizeof(::milvus::grpc::IndexParam)}, - { 130, -1, sizeof(::milvus::grpc::DeleteByRangeParam)}, + { 6, -1, sizeof(::milvus::grpc::PartitionName)}, + { 12, -1, sizeof(::milvus::grpc::TableNameList)}, + { 19, -1, sizeof(::milvus::grpc::TableSchema)}, + { 29, -1, sizeof(::milvus::grpc::PartitionParam)}, + { 37, -1, sizeof(::milvus::grpc::PartitionList)}, + { 44, -1, sizeof(::milvus::grpc::Range)}, + { 51, -1, sizeof(::milvus::grpc::RowRecord)}, + { 57, -1, sizeof(::milvus::grpc::InsertParam)}, + { 66, -1, sizeof(::milvus::grpc::VectorIds)}, + { 73, -1, sizeof(::milvus::grpc::SearchParam)}, + { 84, -1, sizeof(::milvus::grpc::SearchInFilesParam)}, + { 91, -1, sizeof(::milvus::grpc::QueryResult)}, + { 98, -1, sizeof(::milvus::grpc::TopKQueryResult)}, + { 104, -1, sizeof(::milvus::grpc::TopKQueryResultList)}, + { 111, -1, sizeof(::milvus::grpc::StringReply)}, + { 118, -1, sizeof(::milvus::grpc::BoolReply)}, + { 125, -1, sizeof(::milvus::grpc::TableRowCount)}, + { 132, -1, sizeof(::milvus::grpc::Command)}, + { 138, -1, sizeof(::milvus::grpc::Index)}, + { 145, -1, sizeof(::milvus::grpc::IndexParam)}, + { 153, -1, sizeof(::milvus::grpc::DeleteByDateParam)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { reinterpret_cast(&::milvus::grpc::_TableName_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionName_default_instance_), reinterpret_cast(&::milvus::grpc::_TableNameList_default_instance_), reinterpret_cast(&::milvus::grpc::_TableSchema_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionParam_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionList_default_instance_), reinterpret_cast(&::milvus::grpc::_Range_default_instance_), reinterpret_cast(&::milvus::grpc::_RowRecord_default_instance_), reinterpret_cast(&::milvus::grpc::_InsertParam_default_instance_), @@ -568,85 +654,100 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = reinterpret_cast(&::milvus::grpc::_Command_default_instance_), reinterpret_cast(&::milvus::grpc::_Index_default_instance_), reinterpret_cast(&::milvus::grpc::_IndexParam_default_instance_), - reinterpret_cast(&::milvus::grpc::_DeleteByRangeParam_default_instance_), + reinterpret_cast(&::milvus::grpc::_DeleteByDateParam_default_instance_), }; const char descriptor_table_protodef_milvus_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\014milvus.proto\022\013milvus.grpc\032\014status.prot" - "o\"\037\n\tTableName\022\022\n\ntable_name\030\001 \001(\t\"I\n\rTa" - "bleNameList\022#\n\006status\030\001 \001(\0132\023.milvus.grp" - "c.Status\022\023\n\013table_names\030\002 \003(\t\"\207\001\n\013TableS" - "chema\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Stat" - "us\022\022\n\ntable_name\030\002 \001(\t\022\021\n\tdimension\030\003 \001(" - "\003\022\027\n\017index_file_size\030\004 \001(\003\022\023\n\013metric_typ" - "e\030\005 \001(\005\"/\n\005Range\022\023\n\013start_value\030\001 \001(\t\022\021\n" - "\tend_value\030\002 \001(\t\" \n\tRowRecord\022\023\n\013vector_" - "data\030\001 \003(\002\"i\n\013InsertParam\022\022\n\ntable_name\030" - "\001 \001(\t\0220\n\020row_record_array\030\002 \003(\0132\026.milvus" - ".grpc.RowRecord\022\024\n\014row_id_array\030\003 \003(\003\"I\n" - "\tVectorIds\022#\n\006status\030\001 \001(\0132\023.milvus.grpc" - ".Status\022\027\n\017vector_id_array\030\002 \003(\003\"\242\001\n\013Sea" - "rchParam\022\022\n\ntable_name\030\001 \001(\t\0222\n\022query_re" + "o\"\037\n\tTableName\022\022\n\ntable_name\030\001 \001(\t\"\'\n\rPa" + "rtitionName\022\026\n\016partition_name\030\001 \001(\t\"I\n\rT" + "ableNameList\022#\n\006status\030\001 \001(\0132\023.milvus.gr" + "pc.Status\022\023\n\013table_names\030\002 \003(\t\"\207\001\n\013Table" + "Schema\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Sta" + "tus\022\022\n\ntable_name\030\002 \001(\t\022\021\n\tdimension\030\003 \001" + "(\003\022\027\n\017index_file_size\030\004 \001(\003\022\023\n\013metric_ty" + "pe\030\005 \001(\005\"I\n\016PartitionParam\022\022\n\ntable_name" + "\030\001 \001(\t\022\026\n\016partition_name\030\002 \001(\t\022\013\n\003tag\030\003 " + "\001(\t\"j\n\rPartitionList\022#\n\006status\030\001 \001(\0132\023.m" + "ilvus.grpc.Status\0224\n\017partition_array\030\002 \003" + "(\0132\033.milvus.grpc.PartitionParam\"/\n\005Range" + "\022\023\n\013start_value\030\001 \001(\t\022\021\n\tend_value\030\002 \001(\t" + "\" \n\tRowRecord\022\023\n\013vector_data\030\001 \003(\002\"\200\001\n\013I" + "nsertParam\022\022\n\ntable_name\030\001 \001(\t\0220\n\020row_re" "cord_array\030\002 \003(\0132\026.milvus.grpc.RowRecord" - "\022-\n\021query_range_array\030\003 \003(\0132\022.milvus.grp" - "c.Range\022\014\n\004topk\030\004 \001(\003\022\016\n\006nprobe\030\005 \001(\003\"[\n" - "\022SearchInFilesParam\022\025\n\rfile_id_array\030\001 \003" - "(\t\022.\n\014search_param\030\002 \001(\0132\030.milvus.grpc.S" - "earchParam\"+\n\013QueryResult\022\n\n\002id\030\001 \001(\003\022\020\n" - "\010distance\030\002 \001(\001\"H\n\017TopKQueryResult\0225\n\023qu" - "ery_result_arrays\030\001 \003(\0132\030.milvus.grpc.Qu" - "eryResult\"s\n\023TopKQueryResultList\022#\n\006stat" - "us\030\001 \001(\0132\023.milvus.grpc.Status\0227\n\021topk_qu" - "ery_result\030\002 \003(\0132\034.milvus.grpc.TopKQuery" - "Result\"H\n\013StringReply\022#\n\006status\030\001 \001(\0132\023." - "milvus.grpc.Status\022\024\n\014string_reply\030\002 \001(\t" - "\"D\n\tBoolReply\022#\n\006status\030\001 \001(\0132\023.milvus.g" - "rpc.Status\022\022\n\nbool_reply\030\002 \001(\010\"M\n\rTableR" - "owCount\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.St" - "atus\022\027\n\017table_row_count\030\002 \001(\003\"\026\n\007Command" - "\022\013\n\003cmd\030\001 \001(\t\"*\n\005Index\022\022\n\nindex_type\030\001 \001" - "(\005\022\r\n\005nlist\030\002 \001(\005\"h\n\nIndexParam\022#\n\006statu" - "s\030\001 \001(\0132\023.milvus.grpc.Status\022\022\n\ntable_na" - "me\030\002 \001(\t\022!\n\005index\030\003 \001(\0132\022.milvus.grpc.In" - "dex\"K\n\022DeleteByRangeParam\022!\n\005range\030\001 \001(\013" - "2\022.milvus.grpc.Range\022\022\n\ntable_name\030\002 \001(\t" - "2\360\007\n\rMilvusService\022>\n\013CreateTable\022\030.milv" - "us.grpc.TableSchema\032\023.milvus.grpc.Status" - "\"\000\022<\n\010HasTable\022\026.milvus.grpc.TableName\032\026" - ".milvus.grpc.BoolReply\"\000\022:\n\tDropTable\022\026." - "milvus.grpc.TableName\032\023.milvus.grpc.Stat" - "us\"\000\022=\n\013CreateIndex\022\027.milvus.grpc.IndexP" - "aram\032\023.milvus.grpc.Status\"\000\022<\n\006Insert\022\030." - "milvus.grpc.InsertParam\032\026.milvus.grpc.Ve" - "ctorIds\"\000\022F\n\006Search\022\030.milvus.grpc.Search" - "Param\032 .milvus.grpc.TopKQueryResultList\"" - "\000\022T\n\rSearchInFiles\022\037.milvus.grpc.SearchI" - "nFilesParam\032 .milvus.grpc.TopKQueryResul" - "tList\"\000\022C\n\rDescribeTable\022\026.milvus.grpc.T" - "ableName\032\030.milvus.grpc.TableSchema\"\000\022B\n\n" - "CountTable\022\026.milvus.grpc.TableName\032\032.mil" - "vus.grpc.TableRowCount\"\000\022@\n\nShowTables\022\024" - ".milvus.grpc.Command\032\032.milvus.grpc.Table" - "NameList\"\000\0227\n\003Cmd\022\024.milvus.grpc.Command\032" - "\030.milvus.grpc.StringReply\"\000\022G\n\rDeleteByR" - "ange\022\037.milvus.grpc.DeleteByRangeParam\032\023." - "milvus.grpc.Status\"\000\022=\n\014PreloadTable\022\026.m" + "\022\024\n\014row_id_array\030\003 \003(\003\022\025\n\rpartition_tag\030" + "\004 \001(\t\"I\n\tVectorIds\022#\n\006status\030\001 \001(\0132\023.mil" + "vus.grpc.Status\022\027\n\017vector_id_array\030\002 \003(\003" + "\"\277\001\n\013SearchParam\022\022\n\ntable_name\030\001 \001(\t\0222\n\022" + "query_record_array\030\002 \003(\0132\026.milvus.grpc.R" + "owRecord\022-\n\021query_range_array\030\003 \003(\0132\022.mi" + "lvus.grpc.Range\022\014\n\004topk\030\004 \001(\003\022\016\n\006nprobe\030" + "\005 \001(\003\022\033\n\023partition_tag_array\030\006 \003(\t\"[\n\022Se" + "archInFilesParam\022\025\n\rfile_id_array\030\001 \003(\t\022" + ".\n\014search_param\030\002 \001(\0132\030.milvus.grpc.Sear" + "chParam\"+\n\013QueryResult\022\n\n\002id\030\001 \001(\003\022\020\n\010di" + "stance\030\002 \001(\001\"H\n\017TopKQueryResult\0225\n\023query" + "_result_arrays\030\001 \003(\0132\030.milvus.grpc.Query" + "Result\"s\n\023TopKQueryResultList\022#\n\006status\030" + "\001 \001(\0132\023.milvus.grpc.Status\0227\n\021topk_query" + "_result\030\002 \003(\0132\034.milvus.grpc.TopKQueryRes" + "ult\"H\n\013StringReply\022#\n\006status\030\001 \001(\0132\023.mil" + "vus.grpc.Status\022\024\n\014string_reply\030\002 \001(\t\"D\n" + "\tBoolReply\022#\n\006status\030\001 \001(\0132\023.milvus.grpc" + ".Status\022\022\n\nbool_reply\030\002 \001(\010\"M\n\rTableRowC" + "ount\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Statu" + "s\022\027\n\017table_row_count\030\002 \001(\003\"\026\n\007Command\022\013\n" + "\003cmd\030\001 \001(\t\"*\n\005Index\022\022\n\nindex_type\030\001 \001(\005\022" + "\r\n\005nlist\030\002 \001(\005\"h\n\nIndexParam\022#\n\006status\030\001" + " \001(\0132\023.milvus.grpc.Status\022\022\n\ntable_name\030" + "\002 \001(\t\022!\n\005index\030\003 \001(\0132\022.milvus.grpc.Index" + "\"J\n\021DeleteByDateParam\022!\n\005range\030\001 \001(\0132\022.m" + "ilvus.grpc.Range\022\022\n\ntable_name\030\002 \001(\t2\302\t\n" + "\rMilvusService\022>\n\013CreateTable\022\030.milvus.g" + "rpc.TableSchema\032\023.milvus.grpc.Status\"\000\022<" + "\n\010HasTable\022\026.milvus.grpc.TableName\032\026.mil" + "vus.grpc.BoolReply\"\000\022C\n\rDescribeTable\022\026." + "milvus.grpc.TableName\032\030.milvus.grpc.Tabl" + "eSchema\"\000\022B\n\nCountTable\022\026.milvus.grpc.Ta" + "bleName\032\032.milvus.grpc.TableRowCount\"\000\022@\n" + "\nShowTables\022\024.milvus.grpc.Command\032\032.milv" + "us.grpc.TableNameList\"\000\022:\n\tDropTable\022\026.m" "ilvus.grpc.TableName\032\023.milvus.grpc.Statu" - "s\"\000\022B\n\rDescribeIndex\022\026.milvus.grpc.Table" - "Name\032\027.milvus.grpc.IndexParam\"\000\022:\n\tDropI" - "ndex\022\026.milvus.grpc.TableName\032\023.milvus.gr" - "pc.Status\"\000b\006proto3" + "s\"\000\022=\n\013CreateIndex\022\027.milvus.grpc.IndexPa" + "ram\032\023.milvus.grpc.Status\"\000\022B\n\rDescribeIn" + "dex\022\026.milvus.grpc.TableName\032\027.milvus.grp" + "c.IndexParam\"\000\022:\n\tDropIndex\022\026.milvus.grp" + "c.TableName\032\023.milvus.grpc.Status\"\000\022E\n\017Cr" + "eatePartition\022\033.milvus.grpc.PartitionPar" + "am\032\023.milvus.grpc.Status\"\000\022F\n\016ShowPartiti" + "ons\022\026.milvus.grpc.TableName\032\032.milvus.grp" + "c.PartitionList\"\000\022C\n\rDropPartition\022\033.mil" + "vus.grpc.PartitionParam\032\023.milvus.grpc.St" + "atus\"\000\022<\n\006Insert\022\030.milvus.grpc.InsertPar" + "am\032\026.milvus.grpc.VectorIds\"\000\022F\n\006Search\022\030" + ".milvus.grpc.SearchParam\032 .milvus.grpc.T" + "opKQueryResultList\"\000\022T\n\rSearchInFiles\022\037." + "milvus.grpc.SearchInFilesParam\032 .milvus." + "grpc.TopKQueryResultList\"\000\0227\n\003Cmd\022\024.milv" + "us.grpc.Command\032\030.milvus.grpc.StringRepl" + "y\"\000\022E\n\014DeleteByDate\022\036.milvus.grpc.Delete" + "ByDateParam\032\023.milvus.grpc.Status\"\000\022=\n\014Pr" + "eloadTable\022\026.milvus.grpc.TableName\032\023.mil" + "vus.grpc.Status\"\000b\006proto3" ; static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_milvus_2eproto_deps[1] = { &::descriptor_table_status_2eproto, }; -static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_milvus_2eproto_sccs[19] = { +static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_milvus_2eproto_sccs[22] = { &scc_info_BoolReply_milvus_2eproto.base, &scc_info_Command_milvus_2eproto.base, - &scc_info_DeleteByRangeParam_milvus_2eproto.base, + &scc_info_DeleteByDateParam_milvus_2eproto.base, &scc_info_Index_milvus_2eproto.base, &scc_info_IndexParam_milvus_2eproto.base, &scc_info_InsertParam_milvus_2eproto.base, + &scc_info_PartitionList_milvus_2eproto.base, + &scc_info_PartitionName_milvus_2eproto.base, + &scc_info_PartitionParam_milvus_2eproto.base, &scc_info_QueryResult_milvus_2eproto.base, &scc_info_Range_milvus_2eproto.base, &scc_info_RowRecord_milvus_2eproto.base, @@ -664,10 +765,10 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_mil static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_milvus_2eproto_once; static bool descriptor_table_milvus_2eproto_initialized = false; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_milvus_2eproto = { - &descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 2539, - &descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 19, 1, + &descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 3025, + &descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 22, 1, schemas, file_default_instances, TableStruct_milvus_2eproto::offsets, - file_level_metadata_milvus_2eproto, 19, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto, + file_level_metadata_milvus_2eproto, 22, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto, }; // Force running AddDescriptors() at dynamic initialization time. @@ -944,6 +1045,275 @@ void TableName::InternalSwap(TableName* other) { } +// =================================================================== + +void PartitionName::InitAsDefaultInstance() { +} +class PartitionName::_Internal { + public: +}; + +PartitionName::PartitionName() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionName) +} +PartitionName::PartitionName(const PartitionName& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_name().empty()) { + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionName) +} + +void PartitionName::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionName_milvus_2eproto.base); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +PartitionName::~PartitionName() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionName) + SharedDtor(); +} + +void PartitionName::SharedDtor() { + partition_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void PartitionName::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionName& PartitionName::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionName_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionName::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionName::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // string partition_name = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_name(), ptr, ctx, "milvus.grpc.PartitionName.partition_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionName::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionName) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // string partition_name = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionName.partition_name")); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionName) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionName) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionName::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionName.partition_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->partition_name(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionName) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionName::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionName.partition_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 1, this->partition_name(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionName) + return target; +} + +size_t PartitionName::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionName) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_name()); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionName::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionName) + GOOGLE_DCHECK_NE(&from, this); + const PartitionName* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionName) + MergeFrom(*source); + } +} + +void PartitionName::MergeFrom(const PartitionName& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionName) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.partition_name().size() > 0) { + + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } +} + +void PartitionName::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionName) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionName::CopyFrom(const PartitionName& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionName) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionName::IsInitialized() const { + return true; +} + +void PartitionName::InternalSwap(PartitionName* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + partition_name_.Swap(&other->partition_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionName::GetMetadata() const { + return GetMetadataStatic(); +} + + // =================================================================== void TableNameList::InitAsDefaultInstance() { @@ -1742,6 +2112,728 @@ void TableSchema::InternalSwap(TableSchema* other) { } +// =================================================================== + +void PartitionParam::InitAsDefaultInstance() { +} +class PartitionParam::_Internal { + public: +}; + +PartitionParam::PartitionParam() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionParam) +} +PartitionParam::PartitionParam(const PartitionParam& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.table_name().empty()) { + table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); + } + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_name().empty()) { + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.tag().empty()) { + tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.tag_); + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionParam) +} + +void PartitionParam::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionParam_milvus_2eproto.base); + table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +PartitionParam::~PartitionParam() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionParam) + SharedDtor(); +} + +void PartitionParam::SharedDtor() { + table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void PartitionParam::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionParam& PartitionParam::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionParam_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionParam::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // string table_name = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.PartitionParam.table_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + // string partition_name = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_name(), ptr, ctx, "milvus.grpc.PartitionParam.partition_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + // string tag = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_tag(), ptr, ctx, "milvus.grpc.PartitionParam.tag"); + CHK_(ptr); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionParam::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionParam) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // string table_name = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_table_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.table_name")); + } else { + goto handle_unusual; + } + break; + } + + // string partition_name = 2; + case 2: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.partition_name")); + } else { + goto handle_unusual; + } + break; + } + + // string tag = 3; + case 3: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_tag())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.tag")); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionParam) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionParam) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionParam::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.table_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->table_name(), output); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.partition_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 2, this->partition_name(), output); + } + + // string tag = 3; + if (this->tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.tag"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 3, this->tag(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionParam) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionParam::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.table_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 1, this->table_name(), target); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.partition_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 2, this->partition_name(), target); + } + + // string tag = 3; + if (this->tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.tag"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 3, this->tag(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionParam) + return target; +} + +size_t PartitionParam::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionParam) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->table_name()); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_name()); + } + + // string tag = 3; + if (this->tag().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->tag()); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionParam) + GOOGLE_DCHECK_NE(&from, this); + const PartitionParam* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionParam) + MergeFrom(*source); + } +} + +void PartitionParam::MergeFrom(const PartitionParam& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionParam) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.table_name().size() > 0) { + + table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); + } + if (from.partition_name().size() > 0) { + + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + if (from.tag().size() > 0) { + + tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.tag_); + } +} + +void PartitionParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionParam) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionParam::CopyFrom(const PartitionParam& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionParam) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionParam::IsInitialized() const { + return true; +} + +void PartitionParam::InternalSwap(PartitionParam* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); + partition_name_.Swap(&other->partition_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); + tag_.Swap(&other->tag_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionParam::GetMetadata() const { + return GetMetadataStatic(); +} + + +// =================================================================== + +void PartitionList::InitAsDefaultInstance() { + ::milvus::grpc::_PartitionList_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::grpc::Status*>( + ::milvus::grpc::Status::internal_default_instance()); +} +class PartitionList::_Internal { + public: + static const ::milvus::grpc::Status& status(const PartitionList* msg); +}; + +const ::milvus::grpc::Status& +PartitionList::_Internal::status(const PartitionList* msg) { + return *msg->status_; +} +void PartitionList::clear_status() { + if (GetArenaNoVirtual() == nullptr && status_ != nullptr) { + delete status_; + } + status_ = nullptr; +} +PartitionList::PartitionList() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionList) +} +PartitionList::PartitionList(const PartitionList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr), + partition_array_(from.partition_array_) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + if (from.has_status()) { + status_ = new ::milvus::grpc::Status(*from.status_); + } else { + status_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionList) +} + +void PartitionList::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionList_milvus_2eproto.base); + status_ = nullptr; +} + +PartitionList::~PartitionList() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionList) + SharedDtor(); +} + +void PartitionList::SharedDtor() { + if (this != internal_default_instance()) delete status_; +} + +void PartitionList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionList& PartitionList::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionList_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionList::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + partition_array_.Clear(); + if (GetArenaNoVirtual() == nullptr && status_ != nullptr) { + delete status_; + } + status_ = nullptr; + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // .milvus.grpc.Status status = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(mutable_status(), ptr); + CHK_(ptr); + } else goto handle_unusual; + continue; + // repeated .milvus.grpc.PartitionParam partition_array = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(add_partition_array(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionList::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionList) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // .milvus.grpc.Status status = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage( + input, mutable_status())); + } else { + goto handle_unusual; + } + break; + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + case 2: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage( + input, add_partition_array())); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionList) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionList) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionList::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray( + 1, _Internal::status(this), output); + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + for (unsigned int i = 0, + n = static_cast(this->partition_array_size()); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray( + 2, + this->partition_array(static_cast(i)), + output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionList) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionList::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessageToArray( + 1, _Internal::status(this), target); + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + for (unsigned int i = 0, + n = static_cast(this->partition_array_size()); i < n; i++) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessageToArray( + 2, this->partition_array(static_cast(i)), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionList) + return target; +} + +size_t PartitionList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionList) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + { + unsigned int count = static_cast(this->partition_array_size()); + total_size += 1UL * count; + for (unsigned int i = 0; i < count; i++) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + this->partition_array(static_cast(i))); + } + } + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *status_); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionList::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionList) + GOOGLE_DCHECK_NE(&from, this); + const PartitionList* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionList) + MergeFrom(*source); + } +} + +void PartitionList::MergeFrom(const PartitionList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionList) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + partition_array_.MergeFrom(from.partition_array_); + if (from.has_status()) { + mutable_status()->::milvus::grpc::Status::MergeFrom(from.status()); + } +} + +void PartitionList::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionList::CopyFrom(const PartitionList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionList::IsInitialized() const { + return true; +} + +void PartitionList::InternalSwap(PartitionList* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + CastToBase(&partition_array_)->InternalSwap(CastToBase(&other->partition_array_)); + swap(status_, other->status_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionList::GetMetadata() const { + return GetMetadataStatic(); +} + + // =================================================================== void Range::InitAsDefaultInstance() { @@ -2369,12 +3461,17 @@ InsertParam::InsertParam(const InsertParam& from) if (!from.table_name().empty()) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); } + partition_tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_tag().empty()) { + partition_tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_tag_); + } // @@protoc_insertion_point(copy_constructor:milvus.grpc.InsertParam) } void InsertParam::SharedCtor() { ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_InsertParam_milvus_2eproto.base); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } InsertParam::~InsertParam() { @@ -2384,6 +3481,7 @@ InsertParam::~InsertParam() { void InsertParam::SharedDtor() { table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } void InsertParam::SetCachedSize(int size) const { @@ -2404,6 +3502,7 @@ void InsertParam::Clear() { row_record_array_.Clear(); row_id_array_.Clear(); table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); _internal_metadata_.Clear(); } @@ -2444,6 +3543,13 @@ const char* InsertParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID CHK_(ptr); } else goto handle_unusual; continue; + // string partition_tag = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_tag(), ptr, ctx, "milvus.grpc.InsertParam.partition_tag"); + CHK_(ptr); + } else goto handle_unusual; + continue; default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -2516,6 +3622,21 @@ bool InsertParam::MergePartialFromCodedStream( break; } + // string partition_tag = 4; + case 4: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_tag())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.InsertParam.partition_tag")); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -2573,6 +3694,16 @@ void InsertParam::SerializeWithCachedSizes( this->row_id_array(i), output); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.InsertParam.partition_tag"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 4, this->partition_tag(), output); + } + if (_internal_metadata_.have_unknown_fields()) { ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -2618,6 +3749,17 @@ void InsertParam::SerializeWithCachedSizes( WriteInt64NoTagToArray(this->row_id_array_, target); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.InsertParam.partition_tag"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 4, this->partition_tag(), target); + } + if (_internal_metadata_.have_unknown_fields()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -2672,6 +3814,13 @@ size_t InsertParam::ByteSizeLong() const { this->table_name()); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_tag()); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -2705,6 +3854,10 @@ void InsertParam::MergeFrom(const InsertParam& from) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); } + if (from.partition_tag().size() > 0) { + + partition_tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_tag_); + } } void InsertParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { @@ -2732,6 +3885,8 @@ void InsertParam::InternalSwap(InsertParam* other) { row_id_array_.InternalSwap(&other->row_id_array_); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + partition_tag_.Swap(&other->partition_tag_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); } ::PROTOBUF_NAMESPACE_ID::Metadata InsertParam::GetMetadata() const { @@ -3097,7 +4252,8 @@ SearchParam::SearchParam(const SearchParam& from) : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr), query_record_array_(from.query_record_array_), - query_range_array_(from.query_range_array_) { + query_range_array_(from.query_range_array_), + partition_tag_array_(from.partition_tag_array_) { _internal_metadata_.MergeFrom(from._internal_metadata_); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from.table_name().empty()) { @@ -3143,6 +4299,7 @@ void SearchParam::Clear() { query_record_array_.Clear(); query_range_array_.Clear(); + partition_tag_array_.Clear(); table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(&topk_, 0, static_cast( reinterpret_cast(&nprobe_) - @@ -3203,6 +4360,18 @@ const char* SearchParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID CHK_(ptr); } else goto handle_unusual; continue; + // repeated string partition_tag_array = 6; + case 6: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { + ptr -= 1; + do { + ptr += 1; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_partition_tag_array(), ptr, ctx, "milvus.grpc.SearchParam.partition_tag_array"); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50); + } else goto handle_unusual; + continue; default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -3296,6 +4465,22 @@ bool SearchParam::MergePartialFromCodedStream( break; } + // repeated string partition_tag_array = 6; + case 6: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->add_partition_tag_array())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(this->partition_tag_array_size() - 1).data(), + static_cast(this->partition_tag_array(this->partition_tag_array_size() - 1).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.SearchParam.partition_tag_array")); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -3361,6 +4546,16 @@ void SearchParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(5, this->nprobe(), output); } + // repeated string partition_tag_array = 6; + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(i).data(), static_cast(this->partition_tag_array(i).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.SearchParam.partition_tag_array"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString( + 6, this->partition_tag_array(i), output); + } + if (_internal_metadata_.have_unknown_fields()) { ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -3411,6 +4606,16 @@ void SearchParam::SerializeWithCachedSizes( target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->nprobe(), target); } + // repeated string partition_tag_array = 6; + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(i).data(), static_cast(this->partition_tag_array(i).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.SearchParam.partition_tag_array"); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + WriteStringToArray(6, this->partition_tag_array(i), target); + } + if (_internal_metadata_.have_unknown_fields()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -3454,6 +4659,14 @@ size_t SearchParam::ByteSizeLong() const { } } + // repeated string partition_tag_array = 6; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->partition_tag_array_size()); + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_tag_array(i)); + } + // string table_name = 1; if (this->table_name().size() > 0) { total_size += 1 + @@ -3504,6 +4717,7 @@ void SearchParam::MergeFrom(const SearchParam& from) { query_record_array_.MergeFrom(from.query_record_array_); query_range_array_.MergeFrom(from.query_range_array_); + partition_tag_array_.MergeFrom(from.partition_tag_array_); if (from.table_name().size() > 0) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); @@ -3539,6 +4753,7 @@ void SearchParam::InternalSwap(SearchParam* other) { _internal_metadata_.Swap(&other->_internal_metadata_); CastToBase(&query_record_array_)->InternalSwap(CastToBase(&other->query_record_array_)); CastToBase(&query_range_array_)->InternalSwap(CastToBase(&other->query_range_array_)); + partition_tag_array_.InternalSwap(CastToBase(&other->partition_tag_array_)); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); swap(topk_, other->topk_); @@ -6688,25 +7903,25 @@ void IndexParam::InternalSwap(IndexParam* other) { // =================================================================== -void DeleteByRangeParam::InitAsDefaultInstance() { - ::milvus::grpc::_DeleteByRangeParam_default_instance_._instance.get_mutable()->range_ = const_cast< ::milvus::grpc::Range*>( +void DeleteByDateParam::InitAsDefaultInstance() { + ::milvus::grpc::_DeleteByDateParam_default_instance_._instance.get_mutable()->range_ = const_cast< ::milvus::grpc::Range*>( ::milvus::grpc::Range::internal_default_instance()); } -class DeleteByRangeParam::_Internal { +class DeleteByDateParam::_Internal { public: - static const ::milvus::grpc::Range& range(const DeleteByRangeParam* msg); + static const ::milvus::grpc::Range& range(const DeleteByDateParam* msg); }; const ::milvus::grpc::Range& -DeleteByRangeParam::_Internal::range(const DeleteByRangeParam* msg) { +DeleteByDateParam::_Internal::range(const DeleteByDateParam* msg) { return *msg->range_; } -DeleteByRangeParam::DeleteByRangeParam() +DeleteByDateParam::DeleteByDateParam() : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { SharedCtor(); - // @@protoc_insertion_point(constructor:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(constructor:milvus.grpc.DeleteByDateParam) } -DeleteByRangeParam::DeleteByRangeParam(const DeleteByRangeParam& from) +DeleteByDateParam::DeleteByDateParam(const DeleteByDateParam& from) : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { _internal_metadata_.MergeFrom(from._internal_metadata_); @@ -6719,36 +7934,36 @@ DeleteByRangeParam::DeleteByRangeParam(const DeleteByRangeParam& from) } else { range_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(copy_constructor:milvus.grpc.DeleteByDateParam) } -void DeleteByRangeParam::SharedCtor() { - ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_DeleteByRangeParam_milvus_2eproto.base); +void DeleteByDateParam::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_DeleteByDateParam_milvus_2eproto.base); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); range_ = nullptr; } -DeleteByRangeParam::~DeleteByRangeParam() { - // @@protoc_insertion_point(destructor:milvus.grpc.DeleteByRangeParam) +DeleteByDateParam::~DeleteByDateParam() { + // @@protoc_insertion_point(destructor:milvus.grpc.DeleteByDateParam) SharedDtor(); } -void DeleteByRangeParam::SharedDtor() { +void DeleteByDateParam::SharedDtor() { table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (this != internal_default_instance()) delete range_; } -void DeleteByRangeParam::SetCachedSize(int size) const { +void DeleteByDateParam::SetCachedSize(int size) const { _cached_size_.Set(size); } -const DeleteByRangeParam& DeleteByRangeParam::default_instance() { - ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_DeleteByRangeParam_milvus_2eproto.base); +const DeleteByDateParam& DeleteByDateParam::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_DeleteByDateParam_milvus_2eproto.base); return *internal_default_instance(); } -void DeleteByRangeParam::Clear() { -// @@protoc_insertion_point(message_clear_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; @@ -6762,7 +7977,7 @@ void DeleteByRangeParam::Clear() { } #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -const char* DeleteByRangeParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* DeleteByDateParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; @@ -6779,7 +7994,7 @@ const char* DeleteByRangeParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMES // string table_name = 2; case 2: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.DeleteByRangeParam.table_name"); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.DeleteByDateParam.table_name"); CHK_(ptr); } else goto handle_unusual; continue; @@ -6803,11 +8018,11 @@ failure: #undef CHK_ } #else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -bool DeleteByRangeParam::MergePartialFromCodedStream( +bool DeleteByDateParam::MergePartialFromCodedStream( ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { #define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure ::PROTOBUF_NAMESPACE_ID::uint32 tag; - // @@protoc_insertion_point(parse_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_start:milvus.grpc.DeleteByDateParam) for (;;) { ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); tag = p.first; @@ -6832,7 +8047,7 @@ bool DeleteByRangeParam::MergePartialFromCodedStream( DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, - "milvus.grpc.DeleteByRangeParam.table_name")); + "milvus.grpc.DeleteByDateParam.table_name")); } else { goto handle_unusual; } @@ -6851,18 +8066,18 @@ bool DeleteByRangeParam::MergePartialFromCodedStream( } } success: - // @@protoc_insertion_point(parse_success:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_success:milvus.grpc.DeleteByDateParam) return true; failure: - // @@protoc_insertion_point(parse_failure:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_failure:milvus.grpc.DeleteByDateParam) return false; #undef DO_ } #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -void DeleteByRangeParam::SerializeWithCachedSizes( +void DeleteByDateParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { - // @@protoc_insertion_point(serialize_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -6877,7 +8092,7 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "milvus.grpc.DeleteByRangeParam.table_name"); + "milvus.grpc.DeleteByDateParam.table_name"); ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( 2, this->table_name(), output); } @@ -6886,12 +8101,12 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); } - // @@protoc_insertion_point(serialize_end:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_end:milvus.grpc.DeleteByDateParam) } -::PROTOBUF_NAMESPACE_ID::uint8* DeleteByRangeParam::InternalSerializeWithCachedSizesToArray( +::PROTOBUF_NAMESPACE_ID::uint8* DeleteByDateParam::InternalSerializeWithCachedSizesToArray( ::PROTOBUF_NAMESPACE_ID::uint8* target) const { - // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -6907,7 +8122,7 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "milvus.grpc.DeleteByRangeParam.table_name"); + "milvus.grpc.DeleteByDateParam.table_name"); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( 2, this->table_name(), target); @@ -6917,12 +8132,12 @@ void DeleteByRangeParam::SerializeWithCachedSizes( target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); } - // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.DeleteByDateParam) return target; } -size_t DeleteByRangeParam::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.DeleteByRangeParam) +size_t DeleteByDateParam::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.DeleteByDateParam) size_t total_size = 0; if (_internal_metadata_.have_unknown_fields()) { @@ -6953,23 +8168,23 @@ size_t DeleteByRangeParam::ByteSizeLong() const { return total_size; } -void DeleteByRangeParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.DeleteByDateParam) GOOGLE_DCHECK_NE(&from, this); - const DeleteByRangeParam* source = - ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + const DeleteByDateParam* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( &from); if (source == nullptr) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.DeleteByDateParam) MergeFrom(*source); } } -void DeleteByRangeParam::MergeFrom(const DeleteByRangeParam& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::MergeFrom(const DeleteByDateParam& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.DeleteByDateParam) GOOGLE_DCHECK_NE(&from, this); _internal_metadata_.MergeFrom(from._internal_metadata_); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; @@ -6984,25 +8199,25 @@ void DeleteByRangeParam::MergeFrom(const DeleteByRangeParam& from) { } } -void DeleteByRangeParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.DeleteByDateParam) if (&from == this) return; Clear(); MergeFrom(from); } -void DeleteByRangeParam::CopyFrom(const DeleteByRangeParam& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::CopyFrom(const DeleteByDateParam& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.DeleteByDateParam) if (&from == this) return; Clear(); MergeFrom(from); } -bool DeleteByRangeParam::IsInitialized() const { +bool DeleteByDateParam::IsInitialized() const { return true; } -void DeleteByRangeParam::InternalSwap(DeleteByRangeParam* other) { +void DeleteByDateParam::InternalSwap(DeleteByDateParam* other) { using std::swap; _internal_metadata_.Swap(&other->_internal_metadata_); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), @@ -7010,7 +8225,7 @@ void DeleteByRangeParam::InternalSwap(DeleteByRangeParam* other) { swap(range_, other->range_); } -::PROTOBUF_NAMESPACE_ID::Metadata DeleteByRangeParam::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata DeleteByDateParam::GetMetadata() const { return GetMetadataStatic(); } @@ -7022,12 +8237,21 @@ PROTOBUF_NAMESPACE_OPEN template<> PROTOBUF_NOINLINE ::milvus::grpc::TableName* Arena::CreateMaybeMessage< ::milvus::grpc::TableName >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableName >(arena); } +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionName* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionName >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionName >(arena); +} template<> PROTOBUF_NOINLINE ::milvus::grpc::TableNameList* Arena::CreateMaybeMessage< ::milvus::grpc::TableNameList >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableNameList >(arena); } template<> PROTOBUF_NOINLINE ::milvus::grpc::TableSchema* Arena::CreateMaybeMessage< ::milvus::grpc::TableSchema >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableSchema >(arena); } +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionParam* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionParam >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionParam >(arena); +} +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionList* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionList >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionList >(arena); +} template<> PROTOBUF_NOINLINE ::milvus::grpc::Range* Arena::CreateMaybeMessage< ::milvus::grpc::Range >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::Range >(arena); } @@ -7073,8 +8297,8 @@ template<> PROTOBUF_NOINLINE ::milvus::grpc::Index* Arena::CreateMaybeMessage< : template<> PROTOBUF_NOINLINE ::milvus::grpc::IndexParam* Arena::CreateMaybeMessage< ::milvus::grpc::IndexParam >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::IndexParam >(arena); } -template<> PROTOBUF_NOINLINE ::milvus::grpc::DeleteByRangeParam* Arena::CreateMaybeMessage< ::milvus::grpc::DeleteByRangeParam >(Arena* arena) { - return Arena::CreateInternal< ::milvus::grpc::DeleteByRangeParam >(arena); +template<> PROTOBUF_NOINLINE ::milvus::grpc::DeleteByDateParam* Arena::CreateMaybeMessage< ::milvus::grpc::DeleteByDateParam >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::DeleteByDateParam >(arena); } PROTOBUF_NAMESPACE_CLOSE diff --git a/core/src/grpc/gen-milvus/milvus.pb.h b/core/src/grpc/gen-milvus/milvus.pb.h index 5ac3fda023..f41ca2c8c4 100644 --- a/core/src/grpc/gen-milvus/milvus.pb.h +++ b/core/src/grpc/gen-milvus/milvus.pb.h @@ -48,7 +48,7 @@ struct TableStruct_milvus_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[19] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[22] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -63,9 +63,9 @@ extern BoolReplyDefaultTypeInternal _BoolReply_default_instance_; class Command; class CommandDefaultTypeInternal; extern CommandDefaultTypeInternal _Command_default_instance_; -class DeleteByRangeParam; -class DeleteByRangeParamDefaultTypeInternal; -extern DeleteByRangeParamDefaultTypeInternal _DeleteByRangeParam_default_instance_; +class DeleteByDateParam; +class DeleteByDateParamDefaultTypeInternal; +extern DeleteByDateParamDefaultTypeInternal _DeleteByDateParam_default_instance_; class Index; class IndexDefaultTypeInternal; extern IndexDefaultTypeInternal _Index_default_instance_; @@ -75,6 +75,15 @@ extern IndexParamDefaultTypeInternal _IndexParam_default_instance_; class InsertParam; class InsertParamDefaultTypeInternal; extern InsertParamDefaultTypeInternal _InsertParam_default_instance_; +class PartitionList; +class PartitionListDefaultTypeInternal; +extern PartitionListDefaultTypeInternal _PartitionList_default_instance_; +class PartitionName; +class PartitionNameDefaultTypeInternal; +extern PartitionNameDefaultTypeInternal _PartitionName_default_instance_; +class PartitionParam; +class PartitionParamDefaultTypeInternal; +extern PartitionParamDefaultTypeInternal _PartitionParam_default_instance_; class QueryResult; class QueryResultDefaultTypeInternal; extern QueryResultDefaultTypeInternal _QueryResult_default_instance_; @@ -119,10 +128,13 @@ extern VectorIdsDefaultTypeInternal _VectorIds_default_instance_; PROTOBUF_NAMESPACE_OPEN template<> ::milvus::grpc::BoolReply* Arena::CreateMaybeMessage<::milvus::grpc::BoolReply>(Arena*); template<> ::milvus::grpc::Command* Arena::CreateMaybeMessage<::milvus::grpc::Command>(Arena*); -template<> ::milvus::grpc::DeleteByRangeParam* Arena::CreateMaybeMessage<::milvus::grpc::DeleteByRangeParam>(Arena*); +template<> ::milvus::grpc::DeleteByDateParam* Arena::CreateMaybeMessage<::milvus::grpc::DeleteByDateParam>(Arena*); template<> ::milvus::grpc::Index* Arena::CreateMaybeMessage<::milvus::grpc::Index>(Arena*); template<> ::milvus::grpc::IndexParam* Arena::CreateMaybeMessage<::milvus::grpc::IndexParam>(Arena*); template<> ::milvus::grpc::InsertParam* Arena::CreateMaybeMessage<::milvus::grpc::InsertParam>(Arena*); +template<> ::milvus::grpc::PartitionList* Arena::CreateMaybeMessage<::milvus::grpc::PartitionList>(Arena*); +template<> ::milvus::grpc::PartitionName* Arena::CreateMaybeMessage<::milvus::grpc::PartitionName>(Arena*); +template<> ::milvus::grpc::PartitionParam* Arena::CreateMaybeMessage<::milvus::grpc::PartitionParam>(Arena*); template<> ::milvus::grpc::QueryResult* Arena::CreateMaybeMessage<::milvus::grpc::QueryResult>(Arena*); template<> ::milvus::grpc::Range* Arena::CreateMaybeMessage<::milvus::grpc::Range>(Arena*); template<> ::milvus::grpc::RowRecord* Arena::CreateMaybeMessage<::milvus::grpc::RowRecord>(Arena*); @@ -279,6 +291,143 @@ class TableName : }; // ------------------------------------------------------------------- +class PartitionName : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionName) */ { + public: + PartitionName(); + virtual ~PartitionName(); + + PartitionName(const PartitionName& from); + PartitionName(PartitionName&& from) noexcept + : PartitionName() { + *this = ::std::move(from); + } + + inline PartitionName& operator=(const PartitionName& from) { + CopyFrom(from); + return *this; + } + inline PartitionName& operator=(PartitionName&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionName& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionName* internal_default_instance() { + return reinterpret_cast( + &_PartitionName_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(PartitionName& a, PartitionName& b) { + a.Swap(&b); + } + inline void Swap(PartitionName* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionName* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionName* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionName& from); + void MergeFrom(const PartitionName& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionName* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionName"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kPartitionNameFieldNumber = 1, + }; + // string partition_name = 1; + void clear_partition_name(); + const std::string& partition_name() const; + void set_partition_name(const std::string& value); + void set_partition_name(std::string&& value); + void set_partition_name(const char* value); + void set_partition_name(const char* value, size_t size); + std::string* mutable_partition_name(); + std::string* release_partition_name(); + void set_allocated_partition_name(std::string* partition_name); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionName) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_name_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + class TableNameList : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.TableNameList) */ { public: @@ -321,7 +470,7 @@ class TableNameList : &_TableNameList_default_instance_); } static constexpr int kIndexInFileMessages = - 1; + 2; friend void swap(TableNameList& a, TableNameList& b) { a.Swap(&b); @@ -474,7 +623,7 @@ class TableSchema : &_TableSchema_default_instance_); } static constexpr int kIndexInFileMessages = - 2; + 3; friend void swap(TableSchema& a, TableSchema& b) { a.Swap(&b); @@ -600,6 +749,316 @@ class TableSchema : }; // ------------------------------------------------------------------- +class PartitionParam : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionParam) */ { + public: + PartitionParam(); + virtual ~PartitionParam(); + + PartitionParam(const PartitionParam& from); + PartitionParam(PartitionParam&& from) noexcept + : PartitionParam() { + *this = ::std::move(from); + } + + inline PartitionParam& operator=(const PartitionParam& from) { + CopyFrom(from); + return *this; + } + inline PartitionParam& operator=(PartitionParam&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionParam& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionParam* internal_default_instance() { + return reinterpret_cast( + &_PartitionParam_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(PartitionParam& a, PartitionParam& b) { + a.Swap(&b); + } + inline void Swap(PartitionParam* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionParam* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionParam& from); + void MergeFrom(const PartitionParam& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionParam* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionParam"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTableNameFieldNumber = 1, + kPartitionNameFieldNumber = 2, + kTagFieldNumber = 3, + }; + // string table_name = 1; + void clear_table_name(); + const std::string& table_name() const; + void set_table_name(const std::string& value); + void set_table_name(std::string&& value); + void set_table_name(const char* value); + void set_table_name(const char* value, size_t size); + std::string* mutable_table_name(); + std::string* release_table_name(); + void set_allocated_table_name(std::string* table_name); + + // string partition_name = 2; + void clear_partition_name(); + const std::string& partition_name() const; + void set_partition_name(const std::string& value); + void set_partition_name(std::string&& value); + void set_partition_name(const char* value); + void set_partition_name(const char* value, size_t size); + std::string* mutable_partition_name(); + std::string* release_partition_name(); + void set_allocated_partition_name(std::string* partition_name); + + // string tag = 3; + void clear_tag(); + const std::string& tag() const; + void set_tag(const std::string& value); + void set_tag(std::string&& value); + void set_tag(const char* value); + void set_tag(const char* value, size_t size); + std::string* mutable_tag(); + std::string* release_tag(); + void set_allocated_tag(std::string* tag); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionParam) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr tag_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + +class PartitionList : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionList) */ { + public: + PartitionList(); + virtual ~PartitionList(); + + PartitionList(const PartitionList& from); + PartitionList(PartitionList&& from) noexcept + : PartitionList() { + *this = ::std::move(from); + } + + inline PartitionList& operator=(const PartitionList& from) { + CopyFrom(from); + return *this; + } + inline PartitionList& operator=(PartitionList&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionList& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionList* internal_default_instance() { + return reinterpret_cast( + &_PartitionList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(PartitionList& a, PartitionList& b) { + a.Swap(&b); + } + inline void Swap(PartitionList* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionList* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionList& from); + void MergeFrom(const PartitionList& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionList"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kPartitionArrayFieldNumber = 2, + kStatusFieldNumber = 1, + }; + // repeated .milvus.grpc.PartitionParam partition_array = 2; + int partition_array_size() const; + void clear_partition_array(); + ::milvus::grpc::PartitionParam* mutable_partition_array(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >* + mutable_partition_array(); + const ::milvus::grpc::PartitionParam& partition_array(int index) const; + ::milvus::grpc::PartitionParam* add_partition_array(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >& + partition_array() const; + + // .milvus.grpc.Status status = 1; + bool has_status() const; + void clear_status(); + const ::milvus::grpc::Status& status() const; + ::milvus::grpc::Status* release_status(); + ::milvus::grpc::Status* mutable_status(); + void set_allocated_status(::milvus::grpc::Status* status); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionList) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam > partition_array_; + ::milvus::grpc::Status* status_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + class Range : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.Range) */ { public: @@ -642,7 +1101,7 @@ class Range : &_Range_default_instance_); } static constexpr int kIndexInFileMessages = - 3; + 6; friend void swap(Range& a, Range& b) { a.Swap(&b); @@ -792,7 +1251,7 @@ class RowRecord : &_RowRecord_default_instance_); } static constexpr int kIndexInFileMessages = - 4; + 7; friend void swap(RowRecord& a, RowRecord& b) { a.Swap(&b); @@ -930,7 +1389,7 @@ class InsertParam : &_InsertParam_default_instance_); } static constexpr int kIndexInFileMessages = - 5; + 8; friend void swap(InsertParam& a, InsertParam& b) { a.Swap(&b); @@ -1004,6 +1463,7 @@ class InsertParam : kRowRecordArrayFieldNumber = 2, kRowIdArrayFieldNumber = 3, kTableNameFieldNumber = 1, + kPartitionTagFieldNumber = 4, }; // repeated .milvus.grpc.RowRecord row_record_array = 2; int row_record_array_size() const; @@ -1038,6 +1498,17 @@ class InsertParam : std::string* release_table_name(); void set_allocated_table_name(std::string* table_name); + // string partition_tag = 4; + void clear_partition_tag(); + const std::string& partition_tag() const; + void set_partition_tag(const std::string& value); + void set_partition_tag(std::string&& value); + void set_partition_tag(const char* value); + void set_partition_tag(const char* value, size_t size); + std::string* mutable_partition_tag(); + std::string* release_partition_tag(); + void set_allocated_partition_tag(std::string* partition_tag); + // @@protoc_insertion_point(class_scope:milvus.grpc.InsertParam) private: class _Internal; @@ -1047,6 +1518,7 @@ class InsertParam : ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > row_id_array_; mutable std::atomic _row_id_array_cached_byte_size_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_tag_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_milvus_2eproto; }; @@ -1094,7 +1566,7 @@ class VectorIds : &_VectorIds_default_instance_); } static constexpr int kIndexInFileMessages = - 6; + 9; friend void swap(VectorIds& a, VectorIds& b) { a.Swap(&b); @@ -1242,7 +1714,7 @@ class SearchParam : &_SearchParam_default_instance_); } static constexpr int kIndexInFileMessages = - 7; + 10; friend void swap(SearchParam& a, SearchParam& b) { a.Swap(&b); @@ -1315,6 +1787,7 @@ class SearchParam : enum : int { kQueryRecordArrayFieldNumber = 2, kQueryRangeArrayFieldNumber = 3, + kPartitionTagArrayFieldNumber = 6, kTableNameFieldNumber = 1, kTopkFieldNumber = 4, kNprobeFieldNumber = 5, @@ -1341,6 +1814,23 @@ class SearchParam : const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::Range >& query_range_array() const; + // repeated string partition_tag_array = 6; + int partition_tag_array_size() const; + void clear_partition_tag_array(); + const std::string& partition_tag_array(int index) const; + std::string* mutable_partition_tag_array(int index); + void set_partition_tag_array(int index, const std::string& value); + void set_partition_tag_array(int index, std::string&& value); + void set_partition_tag_array(int index, const char* value); + void set_partition_tag_array(int index, const char* value, size_t size); + std::string* add_partition_tag_array(); + void add_partition_tag_array(const std::string& value); + void add_partition_tag_array(std::string&& value); + void add_partition_tag_array(const char* value); + void add_partition_tag_array(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& partition_tag_array() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_partition_tag_array(); + // string table_name = 1; void clear_table_name(); const std::string& table_name() const; @@ -1369,6 +1859,7 @@ class SearchParam : ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::RowRecord > query_record_array_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::Range > query_range_array_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField partition_tag_array_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; ::PROTOBUF_NAMESPACE_ID::int64 topk_; ::PROTOBUF_NAMESPACE_ID::int64 nprobe_; @@ -1419,7 +1910,7 @@ class SearchInFilesParam : &_SearchInFilesParam_default_instance_); } static constexpr int kIndexInFileMessages = - 8; + 11; friend void swap(SearchInFilesParam& a, SearchInFilesParam& b) { a.Swap(&b); @@ -1572,7 +2063,7 @@ class QueryResult : &_QueryResult_default_instance_); } static constexpr int kIndexInFileMessages = - 9; + 12; friend void swap(QueryResult& a, QueryResult& b) { a.Swap(&b); @@ -1710,7 +2201,7 @@ class TopKQueryResult : &_TopKQueryResult_default_instance_); } static constexpr int kIndexInFileMessages = - 10; + 13; friend void swap(TopKQueryResult& a, TopKQueryResult& b) { a.Swap(&b); @@ -1847,7 +2338,7 @@ class TopKQueryResultList : &_TopKQueryResultList_default_instance_); } static constexpr int kIndexInFileMessages = - 11; + 14; friend void swap(TopKQueryResultList& a, TopKQueryResultList& b) { a.Swap(&b); @@ -1994,7 +2485,7 @@ class StringReply : &_StringReply_default_instance_); } static constexpr int kIndexInFileMessages = - 12; + 15; friend void swap(StringReply& a, StringReply& b) { a.Swap(&b); @@ -2141,7 +2632,7 @@ class BoolReply : &_BoolReply_default_instance_); } static constexpr int kIndexInFileMessages = - 13; + 16; friend void swap(BoolReply& a, BoolReply& b) { a.Swap(&b); @@ -2282,7 +2773,7 @@ class TableRowCount : &_TableRowCount_default_instance_); } static constexpr int kIndexInFileMessages = - 14; + 17; friend void swap(TableRowCount& a, TableRowCount& b) { a.Swap(&b); @@ -2423,7 +2914,7 @@ class Command : &_Command_default_instance_); } static constexpr int kIndexInFileMessages = - 15; + 18; friend void swap(Command& a, Command& b) { a.Swap(&b); @@ -2560,7 +3051,7 @@ class Index : &_Index_default_instance_); } static constexpr int kIndexInFileMessages = - 16; + 19; friend void swap(Index& a, Index& b) { a.Swap(&b); @@ -2698,7 +3189,7 @@ class IndexParam : &_IndexParam_default_instance_); } static constexpr int kIndexInFileMessages = - 17; + 20; friend void swap(IndexParam& a, IndexParam& b) { a.Swap(&b); @@ -2813,23 +3304,23 @@ class IndexParam : }; // ------------------------------------------------------------------- -class DeleteByRangeParam : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.DeleteByRangeParam) */ { +class DeleteByDateParam : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.DeleteByDateParam) */ { public: - DeleteByRangeParam(); - virtual ~DeleteByRangeParam(); + DeleteByDateParam(); + virtual ~DeleteByDateParam(); - DeleteByRangeParam(const DeleteByRangeParam& from); - DeleteByRangeParam(DeleteByRangeParam&& from) noexcept - : DeleteByRangeParam() { + DeleteByDateParam(const DeleteByDateParam& from); + DeleteByDateParam(DeleteByDateParam&& from) noexcept + : DeleteByDateParam() { *this = ::std::move(from); } - inline DeleteByRangeParam& operator=(const DeleteByRangeParam& from) { + inline DeleteByDateParam& operator=(const DeleteByDateParam& from) { CopyFrom(from); return *this; } - inline DeleteByRangeParam& operator=(DeleteByRangeParam&& from) noexcept { + inline DeleteByDateParam& operator=(DeleteByDateParam&& from) noexcept { if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { if (this != &from) InternalSwap(&from); } else { @@ -2847,37 +3338,37 @@ class DeleteByRangeParam : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return GetMetadataStatic().reflection; } - static const DeleteByRangeParam& default_instance(); + static const DeleteByDateParam& default_instance(); static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY - static inline const DeleteByRangeParam* internal_default_instance() { - return reinterpret_cast( - &_DeleteByRangeParam_default_instance_); + static inline const DeleteByDateParam* internal_default_instance() { + return reinterpret_cast( + &_DeleteByDateParam_default_instance_); } static constexpr int kIndexInFileMessages = - 18; + 21; - friend void swap(DeleteByRangeParam& a, DeleteByRangeParam& b) { + friend void swap(DeleteByDateParam& a, DeleteByDateParam& b) { a.Swap(&b); } - inline void Swap(DeleteByRangeParam* other) { + inline void Swap(DeleteByDateParam* other) { if (other == this) return; InternalSwap(other); } // implements Message ---------------------------------------------- - inline DeleteByRangeParam* New() const final { - return CreateMaybeMessage(nullptr); + inline DeleteByDateParam* New() const final { + return CreateMaybeMessage(nullptr); } - DeleteByRangeParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + DeleteByDateParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; - void CopyFrom(const DeleteByRangeParam& from); - void MergeFrom(const DeleteByRangeParam& from); + void CopyFrom(const DeleteByDateParam& from); + void MergeFrom(const DeleteByDateParam& from); PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; @@ -2898,10 +3389,10 @@ class DeleteByRangeParam : inline void SharedCtor(); inline void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(DeleteByRangeParam* other); + void InternalSwap(DeleteByDateParam* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "milvus.grpc.DeleteByRangeParam"; + return "milvus.grpc.DeleteByDateParam"; } private: inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { @@ -2948,7 +3439,7 @@ class DeleteByRangeParam : ::milvus::grpc::Range* mutable_range(); void set_allocated_range(::milvus::grpc::Range* range); - // @@protoc_insertion_point(class_scope:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(class_scope:milvus.grpc.DeleteByDateParam) private: class _Internal; @@ -3022,6 +3513,61 @@ inline void TableName::set_allocated_table_name(std::string* table_name) { // ------------------------------------------------------------------- +// PartitionName + +// string partition_name = 1; +inline void PartitionName::clear_partition_name() { + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionName::partition_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionName.partition_name) + return partition_name_.GetNoArena(); +} +inline void PartitionName::set_partition_name(const std::string& value) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(std::string&& value) { + + partition_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(const char* value, size_t size) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionName.partition_name) +} +inline std::string* PartitionName::mutable_partition_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionName.partition_name) + return partition_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionName::release_partition_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionName.partition_name) + + return partition_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionName::set_allocated_partition_name(std::string* partition_name) { + if (partition_name != nullptr) { + + } else { + + } + partition_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionName.partition_name) +} + +// ------------------------------------------------------------------- + // TableNameList // .milvus.grpc.Status status = 1; @@ -3278,6 +3824,242 @@ inline void TableSchema::set_metric_type(::PROTOBUF_NAMESPACE_ID::int32 value) { // ------------------------------------------------------------------- +// PartitionParam + +// string table_name = 1; +inline void PartitionParam::clear_table_name() { + table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::table_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.table_name) + return table_name_.GetNoArena(); +} +inline void PartitionParam::set_table_name(const std::string& value) { + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(std::string&& value) { + + table_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(const char* value, size_t size) { + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.table_name) +} +inline std::string* PartitionParam::mutable_table_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.table_name) + return table_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_table_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.table_name) + + return table_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_table_name(std::string* table_name) { + if (table_name != nullptr) { + + } else { + + } + table_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), table_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.table_name) +} + +// string partition_name = 2; +inline void PartitionParam::clear_partition_name() { + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::partition_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.partition_name) + return partition_name_.GetNoArena(); +} +inline void PartitionParam::set_partition_name(const std::string& value) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(std::string&& value) { + + partition_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(const char* value, size_t size) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.partition_name) +} +inline std::string* PartitionParam::mutable_partition_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.partition_name) + return partition_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_partition_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.partition_name) + + return partition_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_partition_name(std::string* partition_name) { + if (partition_name != nullptr) { + + } else { + + } + partition_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.partition_name) +} + +// string tag = 3; +inline void PartitionParam::clear_tag() { + tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::tag() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.tag) + return tag_.GetNoArena(); +} +inline void PartitionParam::set_tag(const std::string& value) { + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(std::string&& value) { + + tag_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(const char* value, size_t size) { + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.tag) +} +inline std::string* PartitionParam::mutable_tag() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.tag) + return tag_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_tag() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.tag) + + return tag_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_tag(std::string* tag) { + if (tag != nullptr) { + + } else { + + } + tag_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), tag); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.tag) +} + +// ------------------------------------------------------------------- + +// PartitionList + +// .milvus.grpc.Status status = 1; +inline bool PartitionList::has_status() const { + return this != internal_default_instance() && status_ != nullptr; +} +inline const ::milvus::grpc::Status& PartitionList::status() const { + const ::milvus::grpc::Status* p = status_; + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionList.status) + return p != nullptr ? *p : *reinterpret_cast( + &::milvus::grpc::_Status_default_instance_); +} +inline ::milvus::grpc::Status* PartitionList::release_status() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionList.status) + + ::milvus::grpc::Status* temp = status_; + status_ = nullptr; + return temp; +} +inline ::milvus::grpc::Status* PartitionList::mutable_status() { + + if (status_ == nullptr) { + auto* p = CreateMaybeMessage<::milvus::grpc::Status>(GetArenaNoVirtual()); + status_ = p; + } + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionList.status) + return status_; +} +inline void PartitionList::set_allocated_status(::milvus::grpc::Status* status) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_); + } + if (status) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr; + if (message_arena != submessage_arena) { + status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, status, submessage_arena); + } + + } else { + + } + status_ = status; + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionList.status) +} + +// repeated .milvus.grpc.PartitionParam partition_array = 2; +inline int PartitionList::partition_array_size() const { + return partition_array_.size(); +} +inline void PartitionList::clear_partition_array() { + partition_array_.Clear(); +} +inline ::milvus::grpc::PartitionParam* PartitionList::mutable_partition_array(int index) { + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionList.partition_array) + return partition_array_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >* +PartitionList::mutable_partition_array() { + // @@protoc_insertion_point(field_mutable_list:milvus.grpc.PartitionList.partition_array) + return &partition_array_; +} +inline const ::milvus::grpc::PartitionParam& PartitionList::partition_array(int index) const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionList.partition_array) + return partition_array_.Get(index); +} +inline ::milvus::grpc::PartitionParam* PartitionList::add_partition_array() { + // @@protoc_insertion_point(field_add:milvus.grpc.PartitionList.partition_array) + return partition_array_.Add(); +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >& +PartitionList::partition_array() const { + // @@protoc_insertion_point(field_list:milvus.grpc.PartitionList.partition_array) + return partition_array_; +} + +// ------------------------------------------------------------------- + // Range // string start_value = 1; @@ -3531,6 +4313,57 @@ InsertParam::mutable_row_id_array() { return &row_id_array_; } +// string partition_tag = 4; +inline void InsertParam::clear_partition_tag() { + partition_tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& InsertParam::partition_tag() const { + // @@protoc_insertion_point(field_get:milvus.grpc.InsertParam.partition_tag) + return partition_tag_.GetNoArena(); +} +inline void InsertParam::set_partition_tag(const std::string& value) { + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(std::string&& value) { + + partition_tag_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(const char* value, size_t size) { + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.InsertParam.partition_tag) +} +inline std::string* InsertParam::mutable_partition_tag() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.InsertParam.partition_tag) + return partition_tag_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* InsertParam::release_partition_tag() { + // @@protoc_insertion_point(field_release:milvus.grpc.InsertParam.partition_tag) + + return partition_tag_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void InsertParam::set_allocated_partition_tag(std::string* partition_tag) { + if (partition_tag != nullptr) { + + } else { + + } + partition_tag_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_tag); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.InsertParam.partition_tag) +} + // ------------------------------------------------------------------- // VectorIds @@ -3753,6 +4586,71 @@ inline void SearchParam::set_nprobe(::PROTOBUF_NAMESPACE_ID::int64 value) { // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.nprobe) } +// repeated string partition_tag_array = 6; +inline int SearchParam::partition_tag_array_size() const { + return partition_tag_array_.size(); +} +inline void SearchParam::clear_partition_tag_array() { + partition_tag_array_.Clear(); +} +inline const std::string& SearchParam::partition_tag_array(int index) const { + // @@protoc_insertion_point(field_get:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Get(index); +} +inline std::string* SearchParam::mutable_partition_tag_array(int index) { + // @@protoc_insertion_point(field_mutable:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Mutable(index); +} +inline void SearchParam::set_partition_tag_array(int index, const std::string& value) { + // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.partition_tag_array) + partition_tag_array_.Mutable(index)->assign(value); +} +inline void SearchParam::set_partition_tag_array(int index, std::string&& value) { + // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.partition_tag_array) + partition_tag_array_.Mutable(index)->assign(std::move(value)); +} +inline void SearchParam::set_partition_tag_array(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + partition_tag_array_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::set_partition_tag_array(int index, const char* value, size_t size) { + partition_tag_array_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.SearchParam.partition_tag_array) +} +inline std::string* SearchParam::add_partition_tag_array() { + // @@protoc_insertion_point(field_add_mutable:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Add(); +} +inline void SearchParam::add_partition_tag_array(const std::string& value) { + partition_tag_array_.Add()->assign(value); + // @@protoc_insertion_point(field_add:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(std::string&& value) { + partition_tag_array_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(const char* value) { + GOOGLE_DCHECK(value != nullptr); + partition_tag_array_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(const char* value, size_t size) { + partition_tag_array_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:milvus.grpc.SearchParam.partition_tag_array) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +SearchParam::partition_tag_array() const { + // @@protoc_insertion_point(field_list:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +SearchParam::mutable_partition_tag_array() { + // @@protoc_insertion_point(field_mutable_list:milvus.grpc.SearchParam.partition_tag_array) + return &partition_tag_array_; +} + // ------------------------------------------------------------------- // SearchInFilesParam @@ -4484,41 +5382,41 @@ inline void IndexParam::set_allocated_index(::milvus::grpc::Index* index) { // ------------------------------------------------------------------- -// DeleteByRangeParam +// DeleteByDateParam // .milvus.grpc.Range range = 1; -inline bool DeleteByRangeParam::has_range() const { +inline bool DeleteByDateParam::has_range() const { return this != internal_default_instance() && range_ != nullptr; } -inline void DeleteByRangeParam::clear_range() { +inline void DeleteByDateParam::clear_range() { if (GetArenaNoVirtual() == nullptr && range_ != nullptr) { delete range_; } range_ = nullptr; } -inline const ::milvus::grpc::Range& DeleteByRangeParam::range() const { +inline const ::milvus::grpc::Range& DeleteByDateParam::range() const { const ::milvus::grpc::Range* p = range_; - // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByDateParam.range) return p != nullptr ? *p : *reinterpret_cast( &::milvus::grpc::_Range_default_instance_); } -inline ::milvus::grpc::Range* DeleteByRangeParam::release_range() { - // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByRangeParam.range) +inline ::milvus::grpc::Range* DeleteByDateParam::release_range() { + // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByDateParam.range) ::milvus::grpc::Range* temp = range_; range_ = nullptr; return temp; } -inline ::milvus::grpc::Range* DeleteByRangeParam::mutable_range() { +inline ::milvus::grpc::Range* DeleteByDateParam::mutable_range() { if (range_ == nullptr) { auto* p = CreateMaybeMessage<::milvus::grpc::Range>(GetArenaNoVirtual()); range_ = p; } - // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByDateParam.range) return range_; } -inline void DeleteByRangeParam::set_allocated_range(::milvus::grpc::Range* range) { +inline void DeleteByDateParam::set_allocated_range(::milvus::grpc::Range* range) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual(); if (message_arena == nullptr) { delete range_; @@ -4534,58 +5432,58 @@ inline void DeleteByRangeParam::set_allocated_range(::milvus::grpc::Range* range } range_ = range; - // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByDateParam.range) } // string table_name = 2; -inline void DeleteByRangeParam::clear_table_name() { +inline void DeleteByDateParam::clear_table_name() { table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline const std::string& DeleteByRangeParam::table_name() const { - // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByRangeParam.table_name) +inline const std::string& DeleteByDateParam::table_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByDateParam.table_name) return table_name_.GetNoArena(); } -inline void DeleteByRangeParam::set_table_name(const std::string& value) { +inline void DeleteByDateParam::set_table_name(const std::string& value) { table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(std::string&& value) { +inline void DeleteByDateParam::set_table_name(std::string&& value) { table_name_.SetNoArena( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(const char* value) { +inline void DeleteByDateParam::set_table_name(const char* value) { GOOGLE_DCHECK(value != nullptr); table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_char:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(const char* value, size_t size) { +inline void DeleteByDateParam::set_table_name(const char* value, size_t size) { table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.DeleteByDateParam.table_name) } -inline std::string* DeleteByRangeParam::mutable_table_name() { +inline std::string* DeleteByDateParam::mutable_table_name() { - // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByDateParam.table_name) return table_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline std::string* DeleteByRangeParam::release_table_name() { - // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByRangeParam.table_name) +inline std::string* DeleteByDateParam::release_table_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByDateParam.table_name) return table_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline void DeleteByRangeParam::set_allocated_table_name(std::string* table_name) { +inline void DeleteByDateParam::set_allocated_table_name(std::string* table_name) { if (table_name != nullptr) { } else { } table_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), table_name); - // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByDateParam.table_name) } #ifdef __GNUC__ @@ -4627,6 +5525,12 @@ inline void DeleteByRangeParam::set_allocated_table_name(std::string* table_name // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/core/src/grpc/milvus.proto b/core/src/grpc/milvus.proto index 2856dfc6d9..c2dbbc43ab 100644 --- a/core/src/grpc/milvus.proto +++ b/core/src/grpc/milvus.proto @@ -5,14 +5,21 @@ import "status.proto"; package milvus.grpc; /** - * @brief Table Name + * @brief Table name */ message TableName { string table_name = 1; } /** - * @brief Table Name List + * @brief Partition name + */ +message PartitionName { + string partition_name = 1; +} + +/** + * @brief Table name list */ message TableNameList { Status status = 1; @@ -20,7 +27,7 @@ message TableNameList { } /** - * @brief Table Schema + * @brief Table schema */ message TableSchema { Status status = 1; @@ -31,7 +38,24 @@ message TableSchema { } /** - * @brief Range Schema + * @brief Params of partition + */ +message PartitionParam { + string table_name = 1; + string partition_name = 2; + string tag = 3; +} + +/** + * @brief Partition list + */ +message PartitionList { + Status status = 1; + repeated PartitionParam partition_array = 2; +} + +/** + * @brief Range schema */ message Range { string start_value = 1; @@ -46,12 +70,13 @@ message RowRecord { } /** - * @brief params to be inserted + * @brief Params to be inserted */ message InsertParam { string table_name = 1; repeated RowRecord row_record_array = 2; repeated int64 row_id_array = 3; //optional + string partition_tag = 4; } /** @@ -63,7 +88,7 @@ message VectorIds { } /** - * @brief params for searching vector + * @brief Params for searching vector */ message SearchParam { string table_name = 1; @@ -71,10 +96,11 @@ message SearchParam { repeated Range query_range_array = 3; int64 topk = 4; int64 nprobe = 5; + repeated string partition_tag_array = 6; } /** - * @brief params for searching vector in files + * @brief Params for searching vector in files */ message SearchInFilesParam { repeated string file_id_array = 1; @@ -105,7 +131,7 @@ message TopKQueryResultList { } /** - * @brief Server String Reply + * @brief Server string Reply */ message StringReply { Status status = 1; @@ -129,7 +155,7 @@ message TableRowCount { } /** - * @brief Give Server Command + * @brief Give server Command */ message Command { string cmd = 1; @@ -155,169 +181,173 @@ message IndexParam { } /** - * @brief table name and range for DeleteByRange + * @brief table name and range for DeleteByDate */ -message DeleteByRangeParam { +message DeleteByDateParam { Range range = 1; string table_name = 2; } service MilvusService { /** - * @brief Create table method + * @brief This method is used to create table * - * This method is used to create table - * - * @param param, use to provide table information to be created. + * @param TableSchema, use to provide table information to be created. * + * @return Status */ rpc CreateTable(TableSchema) returns (Status){} /** - * @brief Test table existence method + * @brief This method is used to test table existence. * - * This method is used to test table existence. - * - * @param table_name, table name is going to be tested. + * @param TableName, table name is going to be tested. * + * @return BoolReply */ rpc HasTable(TableName) returns (BoolReply) {} /** - * @brief Delete table method + * @brief This method is used to get table schema. * - * This method is used to delete table. + * @param TableName, target table name. * - * @param table_name, table name is going to be deleted. - * - */ - rpc DropTable(TableName) returns (Status) {} - - /** - * @brief Build index by table method - * - * This method is used to build index by table in sync mode. - * - * @param table_name, table is going to be built index. - * - */ - rpc CreateIndex(IndexParam) returns (Status) {} - - /** - * @brief Add vector array to table - * - * This method is used to add vector array to table. - * - * @param table_name, table_name is inserted. - * @param record_array, vector array is inserted. - * - * @return vector id array - */ - rpc Insert(InsertParam) returns (VectorIds) {} - - /** - * @brief Query vector - * - * This method is used to query vector in table. - * - * @param table_name, table_name is queried. - * @param query_record_array, all vector are going to be queried. - * @param query_range_array, optional ranges for conditional search. If not specified, search whole table - * @param topk, how many similarity vectors will be searched. - * - * @return query result array. - */ - rpc Search(SearchParam) returns (TopKQueryResultList) {} - - /** - * @brief Internal use query interface - * - * This method is used to query vector in specified files. - * - * @param file_id_array, specified files id array, queried. - * @param query_record_array, all vector are going to be queried. - * @param query_range_array, optional ranges for conditional search. If not specified, search whole table - * @param topk, how many similarity vectors will be searched. - * - * @return query result array. - */ - rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {} - - /** - * @brief Get table schema - * - * This method is used to get table schema. - * - * @param table_name, target table name. - * - * @return table schema + * @return TableSchema */ rpc DescribeTable(TableName) returns (TableSchema) {} /** - * @brief Get table schema + * @brief This method is used to get table schema. * - * This method is used to get table schema. + * @param TableName, target table name. * - * @param table_name, target table name. - * - * @return table schema + * @return TableRowCount */ rpc CountTable(TableName) returns (TableRowCount) {} /** - * @brief List all tables in database + * @brief This method is used to list all tables. * - * This method is used to list all tables. + * @param Command, dummy parameter. * - * - * @return table names. + * @return TableNameList */ rpc ShowTables(Command) returns (TableNameList) {} /** - * @brief Give the server status + * @brief This method is used to delete table. * - * This method is used to give the server status. + * @param TableName, table name is going to be deleted. * - * @return Server status. + * @return TableNameList + */ + rpc DropTable(TableName) returns (Status) {} + + /** + * @brief This method is used to build index by table in sync mode. + * + * @param IndexParam, index paramters. + * + * @return Status + */ + rpc CreateIndex(IndexParam) returns (Status) {} + + /** + * @brief This method is used to describe index + * + * @param TableName, target table name. + * + * @return IndexParam + */ + rpc DescribeIndex(TableName) returns (IndexParam) {} + + /** + * @brief This method is used to drop index + * + * @param TableName, target table name. + * + * @return Status + */ + rpc DropIndex(TableName) returns (Status) {} + + /** + * @brief This method is used to create partition + * + * @param PartitionParam, partition parameters. + * + * @return Status + */ + rpc CreatePartition(PartitionParam) returns (Status) {} + + /** + * @brief This method is used to show partition information + * + * @param TableName, target table name. + * + * @return PartitionList + */ + rpc ShowPartitions(TableName) returns (PartitionList) {} + + /** + * @brief This method is used to drop partition + * + * @param PartitionParam, target partition. + * + * @return Status + */ + rpc DropPartition(PartitionParam) returns (Status) {} + + /** + * @brief This method is used to add vector array to table. + * + * @param InsertParam, insert parameters. + * + * @return VectorIds + */ + rpc Insert(InsertParam) returns (VectorIds) {} + + /** + * @brief This method is used to query vector in table. + * + * @param SearchParam, search parameters. + * + * @return TopKQueryResultList + */ + rpc Search(SearchParam) returns (TopKQueryResultList) {} + + /** + * @brief This method is used to query vector in specified files. + * + * @param SearchInFilesParam, search in files paremeters. + * + * @return TopKQueryResultList + */ + rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {} + + /** + * @brief This method is used to give the server status. + * + * @param Command, command string + * + * @return StringReply */ rpc Cmd(Command) returns (StringReply) {} /** - * @brief delete table by range + * @brief This method is used to delete vector by date range * - * This method is used to delete vector by range + * @param DeleteByDateParam, delete parameters. * - * @return rpc status. + * @return status */ - rpc DeleteByRange(DeleteByRangeParam) returns (Status) {} + rpc DeleteByDate(DeleteByDateParam) returns (Status) {} /** - * @brief preload table + * @brief This method is used to preload table * - * This method is used to preload table + * @param TableName, target table name. * - * @return Status. + * @return Status */ rpc PreloadTable(TableName) returns (Status) {} - - /** - * @brief describe index - * - * This method is used to describe index - * - * @return Status. - */ - rpc DescribeIndex(TableName) returns (IndexParam) {} - - /** - * @brief drop index - * - * This method is used to drop index - * - * @return Status. - */ - rpc DropIndex(TableName) returns (Status) {} - } diff --git a/core/src/index/CMakeLists.txt b/core/src/index/CMakeLists.txt index 560a73ea16..4b5c1b1de3 100644 --- a/core/src/index/CMakeLists.txt +++ b/core/src/index/CMakeLists.txt @@ -22,7 +22,7 @@ cmake_minimum_required(VERSION 3.12) message(STATUS "------------------------------KNOWHERE-----------------------------------") message(STATUS "Building using CMake version: ${CMAKE_VERSION}") -set(KNOWHERE_VERSION "0.5.0") +set(KNOWHERE_VERSION "0.6.0") string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" KNOWHERE_BASE_VERSION "${KNOWHERE_VERSION}") project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES C CXX) set(CMAKE_CXX_STANDARD 14) @@ -72,17 +72,17 @@ include(ExternalProject) include(DefineOptionsCore) include(BuildUtilsCore) -set(KNOWHERE_GPU_VERSION false) -if (MILVUS_CPU_VERSION OR KNOWHERE_CPU_VERSION) - message(STATUS "Building Knowhere CPU version") - add_compile_definitions("MILVUS_CPU_VERSION") -else () +set(KNOWHERE_CPU_VERSION false) +if (MILVUS_GPU_VERSION OR KNOWHERE_GPU_VERSION) message(STATUS "Building Knowhere GPU version") add_compile_definitions("MILVUS_GPU_VERSION") - set(KNOWHERE_GPU_VERSION true) enable_language(CUDA) find_package(CUDA 10 REQUIRED) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +else () + message(STATUS "Building Knowhere CPU version") + set(KNOWHERE_CPU_VERSION true) + add_compile_definitions("MILVUS_CPU_VERSION") endif () include(ThirdPartyPackagesCore) diff --git a/core/src/index/cmake/DefineOptionsCore.cmake b/core/src/index/cmake/DefineOptionsCore.cmake index 99d1911d85..e49b3a779a 100644 --- a/core/src/index/cmake/DefineOptionsCore.cmake +++ b/core/src/index/cmake/DefineOptionsCore.cmake @@ -41,12 +41,12 @@ macro(define_option_string name description default) endmacro() #---------------------------------------------------------------------- -set_option_category("CPU version") +set_option_category("GPU version") -if (MILVUS_CPU_VERSION) - define_option(KNOWHERE_CPU_VERSION "Build CPU version only" ON) +if (MILVUS_GPU_VERSION) + define_option(KNOWHERE_GPU_VERSION "Build GPU version" ON) else () - define_option(KNOWHERE_CPU_VERSION "Build CPU version only" OFF) + define_option(KNOWHERE_GPU_VERSION "Build GPU version" OFF) endif () #---------------------------------------------------------------------- @@ -81,17 +81,6 @@ define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON) define_option(BUILD_FAISS_WITH_MKL "Build FAISS with MKL" OFF) -#---------------------------------------------------------------------- -if (MSVC) - set_option_category("MSVC") - - define_option(MSVC_LINK_VERBOSE - "Pass verbose linking options when linking libraries and executables" - OFF) - - define_option(KNOWHERE_USE_STATIC_CRT "Build KNOWHERE with statically linked CRT" OFF) -endif () - #---------------------------------------------------------------------- set_option_category("Test and benchmark") diff --git a/core/src/main.cpp b/core/src/main.cpp index 9bb457cdda..c5b2d2dffe 100644 --- a/core/src/main.cpp +++ b/core/src/main.cpp @@ -51,7 +51,13 @@ print_banner() { std::cout << " /_/ /_/___/____/___/\\____/___/ " << std::endl; std::cout << std::endl; std::cout << "Welcome to Milvus!" << std::endl; - std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << std::endl; + std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << ", with " +#ifdef WITH_MKL + << "MKL" +#else + << "OpenBLAS" +#endif + << " library." << std::endl; #ifdef MILVUS_CPU_VERSION std::cout << "You are using Milvus CPU version" << std::endl; #else @@ -137,7 +143,7 @@ main(int argc, char* argv[]) { s = server.Start(); if (s.ok()) { - std::cout << "Milvus server start successfully." << std::endl; + std::cout << "Milvus server started successfully!" << std::endl; } else { goto FAIL; } diff --git a/core/src/scheduler/job/SearchJob.cpp b/core/src/scheduler/job/SearchJob.cpp index 47c825c122..ec93c69f55 100644 --- a/core/src/scheduler/job/SearchJob.cpp +++ b/core/src/scheduler/job/SearchJob.cpp @@ -49,13 +49,21 @@ void SearchJob::SearchDone(size_t index_id) { std::unique_lock lock(mutex_); index_files_.erase(index_id); - cv_.notify_all(); + if (index_files_.empty()) { + cv_.notify_all(); + } + SERVER_LOG_DEBUG << "SearchJob " << id() << " finish index file: " << index_id; } -ResultSet& -SearchJob::GetResult() { - return result_; +ResultIds& +SearchJob::GetResultIds() { + return result_ids_; +} + +ResultDistances& +SearchJob::GetResultDistances() { + return result_distances_; } Status& diff --git a/core/src/scheduler/job/SearchJob.h b/core/src/scheduler/job/SearchJob.h index 1e586090b9..ff5ab34131 100644 --- a/core/src/scheduler/job/SearchJob.h +++ b/core/src/scheduler/job/SearchJob.h @@ -29,6 +29,7 @@ #include #include "Job.h" +#include "db/Types.h" #include "db/meta/MetaTypes.h" namespace milvus { @@ -37,9 +38,9 @@ namespace scheduler { using engine::meta::TableFileSchemaPtr; using Id2IndexMap = std::unordered_map; -using IdDistPair = std::pair; -using Id2DistVec = std::vector; -using ResultSet = std::vector; + +using ResultIds = engine::ResultIds; +using ResultDistances = engine::ResultDistances; class SearchJob : public Job { public: @@ -55,8 +56,11 @@ class SearchJob : public Job { void SearchDone(size_t index_id); - ResultSet& - GetResult(); + ResultIds& + GetResultIds(); + + ResultDistances& + GetResultDistances(); Status& GetStatus(); @@ -90,6 +94,11 @@ class SearchJob : public Job { return index_files_; } + std::mutex& + mutex() { + return mutex_; + } + private: uint64_t topk_ = 0; uint64_t nq_ = 0; @@ -99,7 +108,8 @@ class SearchJob : public Job { Id2IndexMap index_files_; // TODO: column-base better ? - ResultSet result_; + ResultIds result_ids_; + ResultDistances result_distances_; Status status_; std::mutex mutex_; diff --git a/core/src/scheduler/task/SearchTask.cpp b/core/src/scheduler/task/SearchTask.cpp index 1bf1caff76..08bc6525aa 100644 --- a/core/src/scheduler/task/SearchTask.cpp +++ b/core/src/scheduler/task/SearchTask.cpp @@ -219,8 +219,11 @@ XSearchTask::Execute() { // step 3: pick up topk result auto spec_k = index_engine_->Count() < topk ? index_engine_->Count() : topk; - XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, - search_job->GetResult()); + { + std::unique_lock lock(search_job->mutex()); + XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, + search_job->GetResultIds(), search_job->GetResultDistances()); + } span = rc.RecordSection(hdr + ", reduce topk"); // search_job->AccumReduceCost(span); @@ -240,71 +243,69 @@ XSearchTask::Execute() { } void -XSearchTask::MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, - uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, - scheduler::ResultSet& result) { - if (result.empty()) { - result.resize(nq); +XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, + size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, + scheduler::ResultDistances& tar_distances) { + if (src_ids.empty()) { + return; } + size_t tar_k = tar_ids.size() / nq; + size_t buf_k = std::min(topk, src_k + tar_k); + + scheduler::ResultIds buf_ids(nq * buf_k, -1); + scheduler::ResultDistances buf_distances(nq * buf_k, 0.0); + for (uint64_t i = 0; i < nq; i++) { - scheduler::Id2DistVec result_buf; - auto& result_i = result[i]; + size_t buf_k_j = 0, src_k_j = 0, tar_k_j = 0; + size_t buf_idx, src_idx, tar_idx; - if (result[i].empty()) { - result_buf.resize(input_k, scheduler::IdDistPair(-1, 0.0)); - uint64_t input_k_multi_i = topk * i; - for (auto k = 0; k < input_k; ++k) { - uint64_t idx = input_k_multi_i + k; - auto& result_buf_item = result_buf[k]; - result_buf_item.first = input_ids[idx]; - result_buf_item.second = input_distance[idx]; + size_t buf_k_multi_i = buf_k * i; + size_t src_k_multi_i = topk * i; + size_t tar_k_multi_i = tar_k * i; + + while (buf_k_j < buf_k && src_k_j < src_k && tar_k_j < tar_k) { + src_idx = src_k_multi_i + src_k_j; + tar_idx = tar_k_multi_i + tar_k_j; + buf_idx = buf_k_multi_i + buf_k_j; + + if ((ascending && src_distances[src_idx] < tar_distances[tar_idx]) || + (!ascending && src_distances[src_idx] > tar_distances[tar_idx])) { + buf_ids[buf_idx] = src_ids[src_idx]; + buf_distances[buf_idx] = src_distances[src_idx]; + src_k_j++; + } else { + buf_ids[buf_idx] = tar_ids[tar_idx]; + buf_distances[buf_idx] = tar_distances[tar_idx]; + tar_k_j++; } - } else { - size_t tar_size = result_i.size(); - uint64_t output_k = std::min(topk, input_k + tar_size); - result_buf.resize(output_k, scheduler::IdDistPair(-1, 0.0)); - size_t buf_k = 0, src_k = 0, tar_k = 0; - uint64_t src_idx; - uint64_t input_k_multi_i = topk * i; - while (buf_k < output_k && src_k < input_k && tar_k < tar_size) { - src_idx = input_k_multi_i + src_k; - auto& result_buf_item = result_buf[buf_k]; - auto& result_item = result_i[tar_k]; - if ((ascending && input_distance[src_idx] < result_item.second) || - (!ascending && input_distance[src_idx] > result_item.second)) { - result_buf_item.first = input_ids[src_idx]; - result_buf_item.second = input_distance[src_idx]; - src_k++; - } else { - result_buf_item = result_item; - tar_k++; + buf_k_j++; + } + + if (buf_k_j < buf_k) { + if (src_k_j < src_k) { + while (buf_k_j < buf_k && src_k_j < src_k) { + buf_idx = buf_k_multi_i + buf_k_j; + src_idx = src_k_multi_i + src_k_j; + buf_ids[buf_idx] = src_ids[src_idx]; + buf_distances[buf_idx] = src_distances[src_idx]; + src_k_j++; + buf_k_j++; } - buf_k++; - } - - if (buf_k < output_k) { - if (src_k < input_k) { - while (buf_k < output_k && src_k < input_k) { - src_idx = input_k_multi_i + src_k; - auto& result_buf_item = result_buf[buf_k]; - result_buf_item.first = input_ids[src_idx]; - result_buf_item.second = input_distance[src_idx]; - src_k++; - buf_k++; - } - } else { - while (buf_k < output_k && tar_k < tar_size) { - result_buf[buf_k] = result_i[tar_k]; - tar_k++; - buf_k++; - } + } else { + while (buf_k_j < buf_k && tar_k_j < tar_k) { + buf_idx = buf_k_multi_i + buf_k_j; + tar_idx = tar_k_multi_i + tar_k_j; + buf_ids[buf_idx] = tar_ids[tar_idx]; + buf_distances[buf_idx] = tar_distances[tar_idx]; + tar_k_j++; + buf_k_j++; } } } - - result_i.swap(result_buf); } + tar_ids.swap(buf_ids); + tar_distances.swap(buf_distances); } // void diff --git a/core/src/scheduler/task/SearchTask.h b/core/src/scheduler/task/SearchTask.h index bbc8b5bd8f..bd51137341 100644 --- a/core/src/scheduler/task/SearchTask.h +++ b/core/src/scheduler/task/SearchTask.h @@ -39,8 +39,9 @@ class XSearchTask : public Task { public: static void - MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, - uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, scheduler::ResultSet& result); + MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, + size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, + scheduler::ResultDistances& tar_distances); // static void // MergeTopkArray(std::vector& tar_ids, std::vector& tar_distance, uint64_t& tar_input_k, diff --git a/core/src/sdk/examples/CMakeLists.txt b/core/src/sdk/examples/CMakeLists.txt index aa15190178..a394f1ce9b 100644 --- a/core/src/sdk/examples/CMakeLists.txt +++ b/core/src/sdk/examples/CMakeLists.txt @@ -17,5 +17,7 @@ # under the License. #------------------------------------------------------------------------------- +aux_source_directory(${MILVUS_SOURCE_DIR}/src/sdk/examples/utils util_files) -add_subdirectory(grpcsimple) +add_subdirectory(simple) +add_subdirectory(partition) diff --git a/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp b/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp deleted file mode 100644 index 069283200f..0000000000 --- a/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp +++ /dev/null @@ -1,371 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "sdk/examples/grpcsimple/src/ClientTest.h" -#include "MilvusApi.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -//#define SET_VECTOR_IDS; - -namespace { -const std::string& -GetTableName(); - -const char* TABLE_NAME = GetTableName().c_str(); -constexpr int64_t TABLE_DIMENSION = 512; -constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; -constexpr int64_t BATCH_ROW_COUNT = 100000; -constexpr int64_t NQ = 5; -constexpr int64_t TOP_K = 10; -constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different -constexpr int64_t ADD_VECTOR_LOOP = 5; -constexpr int64_t SECONDS_EACH_HOUR = 3600; -constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; -constexpr int32_t N_LIST = 15000; - -#define BLOCK_SPLITER std::cout << "===========================================" << std::endl; - -void -PrintTableSchema(const milvus::TableSchema& tb_schema) { - BLOCK_SPLITER - std::cout << "Table name: " << tb_schema.table_name << std::endl; - std::cout << "Table dimension: " << tb_schema.dimension << std::endl; - BLOCK_SPLITER -} - -void -PrintSearchResult(const std::vector>& search_record_array, - const std::vector& topk_query_result_array) { - BLOCK_SPLITER - std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl; - - int32_t index = 0; - for (auto& result : topk_query_result_array) { - auto search_id = search_record_array[index].first; - index++; - std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top " - << std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl; - for (auto& item : result.query_result_arrays) { - std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance); - std::cout << std::endl; - } - } - - BLOCK_SPLITER -} - -std::string -CurrentTime() { - time_t tt; - time(&tt); - tt = tt + 8 * SECONDS_EACH_HOUR; - tm t; - gmtime_r(&tt, &t); - - std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" + - std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) + - "_" + std::to_string(t.tm_sec); - - return str; -} - -std::string -CurrentTmDate(int64_t offset_day = 0) { - time_t tt; - time(&tt); - tt = tt + 8 * SECONDS_EACH_HOUR; - tt = tt + 24 * SECONDS_EACH_HOUR * offset_day; - tm t; - gmtime_r(&tt, &t); - - std::string str = - std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday); - - return str; -} - -const std::string& -GetTableName() { - static std::string s_id("tbl_" + CurrentTime()); - return s_id; -} - -milvus::TableSchema -BuildTableSchema() { - milvus::TableSchema tb_schema; - tb_schema.table_name = TABLE_NAME; - tb_schema.dimension = TABLE_DIMENSION; - tb_schema.index_file_size = TABLE_INDEX_FILE_SIZE; - tb_schema.metric_type = milvus::MetricType::L2; - - return tb_schema; -} - -void -BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array) { - if (to <= from) { - return; - } - - vector_record_array.clear(); - for (int64_t k = from; k < to; k++) { - milvus::RowRecord record; - record.data.resize(TABLE_DIMENSION); - for (int64_t i = 0; i < TABLE_DIMENSION; i++) { - record.data[i] = (float)(k % (i + 1)); - } - - vector_record_array.emplace_back(record); - } -} - -void -Sleep(int seconds) { - std::cout << "Waiting " << seconds << " seconds ..." << std::endl; - sleep(seconds); -} - -class TimeRecorder { - public: - explicit TimeRecorder(const std::string& title) : title_(title) { - start_ = std::chrono::system_clock::now(); - } - - ~TimeRecorder() { - std::chrono::system_clock::time_point end = std::chrono::system_clock::now(); - int64_t span = (std::chrono::duration_cast(end - start_)).count(); - std::cout << title_ << " totally cost: " << span << " ms" << std::endl; - } - - private: - std::string title_; - std::chrono::system_clock::time_point start_; -}; - -void -CheckResult(const std::vector>& search_record_array, - const std::vector& topk_query_result_array) { - BLOCK_SPLITER - int64_t index = 0; - for (auto& result : topk_query_result_array) { - auto result_id = result.query_result_arrays[0].id; - auto search_id = search_record_array[index++].first; - if (result_id != search_id) { - std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl; - } else { - std::cout << "Check result sucessfully" << std::endl; - } - } - BLOCK_SPLITER -} - -void -DoSearch(std::shared_ptr conn, - const std::vector>& search_record_array, const std::string& phase_name) { - std::vector query_range_array; - milvus::Range rg; - rg.start_value = CurrentTmDate(); - rg.end_value = CurrentTmDate(1); - query_range_array.emplace_back(rg); - - std::vector record_array; - for (auto& pair : search_record_array) { - record_array.push_back(pair.second); - } - - auto start = std::chrono::high_resolution_clock::now(); - std::vector topk_query_result_array; - { - TimeRecorder rc(phase_name); - milvus::Status stat = - conn->Search(TABLE_NAME, record_array, query_range_array, TOP_K, 32, topk_query_result_array); - std::cout << "SearchVector function call status: " << stat.message() << std::endl; - } - auto finish = std::chrono::high_resolution_clock::now(); - std::cout << "SEARCHVECTOR COST: " - << std::chrono::duration_cast>(finish - start).count() << "s\n"; - - PrintSearchResult(search_record_array, topk_query_result_array); - CheckResult(search_record_array, topk_query_result_array); -} -} // namespace - -void -ClientTest::Test(const std::string& address, const std::string& port) { - std::shared_ptr conn = milvus::Connection::Create(); - - { // connect server - milvus::ConnectParam param = {address, port}; - milvus::Status stat = conn->Connect(param); - std::cout << "Connect function call status: " << stat.message() << std::endl; - } - - { // server version - std::string version = conn->ServerVersion(); - std::cout << "Server version: " << version << std::endl; - } - - { // sdk version - std::string version = conn->ClientVersion(); - std::cout << "SDK version: " << version << std::endl; - } - - { - std::vector tables; - milvus::Status stat = conn->ShowTables(tables); - std::cout << "ShowTables function call status: " << stat.message() << std::endl; - std::cout << "All tables: " << std::endl; - for (auto& table : tables) { - int64_t row_count = 0; - // conn->DropTable(table); - stat = conn->CountTable(table, row_count); - std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl; - } - } - - { // create table - milvus::TableSchema tb_schema = BuildTableSchema(); - milvus::Status stat = conn->CreateTable(tb_schema); - std::cout << "CreateTable function call status: " << stat.message() << std::endl; - PrintTableSchema(tb_schema); - - bool has_table = conn->HasTable(tb_schema.table_name); - if (has_table) { - std::cout << "Table is created" << std::endl; - } - } - - { // describe table - milvus::TableSchema tb_schema; - milvus::Status stat = conn->DescribeTable(TABLE_NAME, tb_schema); - std::cout << "DescribeTable function call status: " << stat.message() << std::endl; - PrintTableSchema(tb_schema); - } - - std::vector> search_record_array; - { // insert vectors - for (int i = 0; i < ADD_VECTOR_LOOP; i++) { // add vectors - std::vector record_array; - int64_t begin_index = i * BATCH_ROW_COUNT; - BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array); - -#ifdef SET_VECTOR_IDS - record_ids.resize(ADD_VECTOR_LOOP * BATCH_ROW_COUNT); - for (auto j = begin_index; j < begin_index + BATCH_ROW_COUNT; j++) { - record_ids[i * BATCH_ROW_COUNT + j] = i * BATCH_ROW_COUNT + j; - } -#endif - - std::vector record_ids; - // generate user defined ids - for (int k = 0; k < BATCH_ROW_COUNT; k++) { - record_ids.push_back(i * BATCH_ROW_COUNT + k); - } - - auto start = std::chrono::high_resolution_clock::now(); - - milvus::Status stat = conn->Insert(TABLE_NAME, record_array, record_ids); - auto finish = std::chrono::high_resolution_clock::now(); - std::cout << "InsertVector cost: " - << std::chrono::duration_cast>(finish - start).count() << "s\n"; - - std::cout << "InsertVector function call status: " << stat.message() << std::endl; - std::cout << "Returned id array count: " << record_ids.size() << std::endl; - - if (search_record_array.size() < NQ) { - search_record_array.push_back(std::make_pair(record_ids[SEARCH_TARGET], record_array[SEARCH_TARGET])); - } - } - } - - { // search vectors without index - Sleep(2); - - int64_t row_count = 0; - milvus::Status stat = conn->CountTable(TABLE_NAME, row_count); - std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; - // DoSearch(conn, search_record_array, "Search without index"); - } - - { // wait unit build index finish - std::cout << "Wait until create all index done" << std::endl; - milvus::IndexParam index; - index.table_name = TABLE_NAME; - index.index_type = INDEX_TYPE; - index.nlist = N_LIST; - milvus::Status stat = conn->CreateIndex(index); - std::cout << "CreateIndex function call status: " << stat.message() << std::endl; - - milvus::IndexParam index2; - stat = conn->DescribeIndex(TABLE_NAME, index2); - std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; - } - - { // preload table - milvus::Status stat = conn->PreloadTable(TABLE_NAME); - std::cout << "PreloadTable function call status: " << stat.message() << std::endl; - } - - { // search vectors after build index finish - for (uint64_t i = 0; i < 5; ++i) { - DoSearch(conn, search_record_array, "Search after build index finish"); - } - // std::cout << conn->DumpTaskTables() << std::endl; - } - - { // delete index - milvus::Status stat = conn->DropIndex(TABLE_NAME); - std::cout << "DropIndex function call status: " << stat.message() << std::endl; - - int64_t row_count = 0; - stat = conn->CountTable(TABLE_NAME, row_count); - std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; - } - - { // delete by range - milvus::Range rg; - rg.start_value = CurrentTmDate(-3); - rg.end_value = CurrentTmDate(-2); - - milvus::Status stat = conn->DeleteByRange(rg, TABLE_NAME); - std::cout << "DeleteByRange function call status: " << stat.message() << std::endl; - } - - { - // delete table - // Status stat = conn->DropTable(TABLE_NAME); - // std::cout << "DeleteTable function call status: " << stat.message() << std::endl; - } - - { // server status - std::string status = conn->ServerStatus(); - std::cout << "Server status before disconnect: " << status << std::endl; - } - milvus::Connection::Destroy(conn); - { // server status - std::string status = conn->ServerStatus(); - std::cout << "Server status after disconnect: " << status << std::endl; - } -} diff --git a/core/src/sdk/examples/partition/CMakeLists.txt b/core/src/sdk/examples/partition/CMakeLists.txt new file mode 100644 index 0000000000..dc5ea46a62 --- /dev/null +++ b/core/src/sdk/examples/partition/CMakeLists.txt @@ -0,0 +1,34 @@ +#------------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +#------------------------------------------------------------------------------- + + +aux_source_directory(src src_files) + +add_executable(sdk_partition + main.cpp + ${src_files} + ${util_files} + ) + +target_link_libraries(sdk_partition + milvus_sdk + pthread + ) + +install(TARGETS sdk_partition DESTINATION bin) diff --git a/core/src/sdk/examples/partition/main.cpp b/core/src/sdk/examples/partition/main.cpp new file mode 100644 index 0000000000..f0de9b1fc4 --- /dev/null +++ b/core/src/sdk/examples/partition/main.cpp @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "sdk/examples/partition/src/ClientTest.h" + +void +print_help(const std::string& app_name); + +int +main(int argc, char* argv[]) { + printf("Client start...\n"); + + std::string app_name = basename(argv[0]); + static struct option long_options[] = {{"server", optional_argument, nullptr, 's'}, + {"port", optional_argument, nullptr, 'p'}, + {"help", no_argument, nullptr, 'h'}, + {nullptr, 0, nullptr, 0}}; + + int option_index = 0; + std::string address = "127.0.0.1", port = "19530"; + app_name = argv[0]; + + int value; + while ((value = getopt_long(argc, argv, "s:p:h", long_options, &option_index)) != -1) { + switch (value) { + case 's': { + char* address_ptr = strdup(optarg); + address = address_ptr; + free(address_ptr); + break; + } + case 'p': { + char* port_ptr = strdup(optarg); + port = port_ptr; + free(port_ptr); + break; + } + case 'h': + default: + print_help(app_name); + return EXIT_SUCCESS; + } + } + + ClientTest test; + test.Test(address, port); + + printf("Client stop...\n"); + return 0; +} + +void +print_help(const std::string& app_name) { + printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str()); + printf(" Options:\n"); + printf(" -s --server Server address, default 127.0.0.1\n"); + printf(" -p --port Server port, default 19530\n"); + printf(" -h --help Print help information\n"); + printf("\n"); +} diff --git a/core/src/sdk/examples/partition/src/ClientTest.cpp b/core/src/sdk/examples/partition/src/ClientTest.cpp new file mode 100644 index 0000000000..6e4a7d1826 --- /dev/null +++ b/core/src/sdk/examples/partition/src/ClientTest.cpp @@ -0,0 +1,205 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/partition/src/ClientTest.h" +#include "MilvusApi.h" +#include "sdk/examples/utils/Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { + +const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str(); + +constexpr int64_t TABLE_DIMENSION = 512; +constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; +constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2; +constexpr int64_t BATCH_ROW_COUNT = 10000; +constexpr int64_t NQ = 5; +constexpr int64_t TOP_K = 10; +constexpr int64_t NPROBE = 32; +constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr int32_t N_LIST = 15000; +constexpr int32_t PARTITION_COUNT = 5; +constexpr int32_t TARGET_PARTITION = 3; + +milvus::TableSchema +BuildTableSchema() { + milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE}; + return tb_schema; +} + +milvus::PartitionParam +BuildPartitionParam(int32_t index) { + std::string tag = std::to_string(index); + std::string partition_name = std::string(TABLE_NAME) + "_" + tag; + milvus::PartitionParam partition_param = {TABLE_NAME, partition_name, tag}; + return partition_param; +} + +milvus::IndexParam +BuildIndexParam() { + milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST}; + return index_param; +} + +} // namespace + +void +ClientTest::Test(const std::string& address, const std::string& port) { + std::shared_ptr conn = milvus::Connection::Create(); + + milvus::Status stat; + { // connect server + milvus::ConnectParam param = {address, port}; + stat = conn->Connect(param); + std::cout << "Connect function call status: " << stat.message() << std::endl; + } + + { // create table + milvus::TableSchema tb_schema = BuildTableSchema(); + stat = conn->CreateTable(tb_schema); + std::cout << "CreateTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + } + + { // create partition + for (int32_t i = 0; i < PARTITION_COUNT; i++) { + milvus::PartitionParam partition_param = BuildPartitionParam(i); + stat = conn->CreatePartition(partition_param); + std::cout << "CreatePartition function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintPartitionParam(partition_param); + } + } + + { // insert vectors + milvus_sdk::TimeRecorder rc("All vectors"); + for (int i = 0; i < PARTITION_COUNT * 5; i++) { + std::vector record_array; + std::vector record_ids; + int64_t begin_index = i * BATCH_ROW_COUNT; + { // generate vectors + milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i)); + milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids, + TABLE_DIMENSION); + } + + std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i); + milvus_sdk::TimeRecorder rc(title); + stat = conn->Insert(TABLE_NAME, std::to_string(i % PARTITION_COUNT), record_array, record_ids); + } + } + + std::vector> search_record_array; + { // build search vectors + std::vector record_array; + std::vector record_ids; + int64_t index = TARGET_PARTITION * BATCH_ROW_COUNT + SEARCH_TARGET; + milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION); + search_record_array.push_back(std::make_pair(record_ids[0], record_array[0])); + } + + milvus_sdk::Utils::Sleep(3); + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // search vectors + std::cout << "Search in correct partition" << std::endl; + std::vector partiton_tags = {std::to_string(TARGET_PARTITION)}; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + std::cout << "Search in wrong partition" << std::endl; + partiton_tags = {"0"}; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + + std::cout << "Search by regex matched partition tag" << std::endl; + partiton_tags = {"\\d"}; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // wait unit build index finish + std::cout << "Wait until create all index done" << std::endl; + milvus::IndexParam index1 = BuildIndexParam(); + milvus_sdk::Utils::PrintIndexParam(index1); + stat = conn->CreateIndex(index1); + std::cout << "CreateIndex function call status: " << stat.message() << std::endl; + + milvus::IndexParam index2; + stat = conn->DescribeIndex(TABLE_NAME, index2); + std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintIndexParam(index2); + } + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // drop partition + milvus::PartitionParam param1 = {TABLE_NAME, "", std::to_string(TARGET_PARTITION)}; + milvus_sdk::Utils::PrintPartitionParam(param1); + stat = conn->DropPartition(param1); + std::cout << "DropPartition function call status: " << stat.message() << std::endl; + } + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // search vectors + std::cout << "Search in whole table" << std::endl; + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // drop index + stat = conn->DropIndex(TABLE_NAME); + std::cout << "DropIndex function call status: " << stat.message() << std::endl; + + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // drop table + stat = conn->DropTable(TABLE_NAME); + std::cout << "DropTable function call status: " << stat.message() << std::endl; + } + + milvus::Connection::Destroy(conn); +} diff --git a/core/src/sdk/examples/grpcsimple/src/ClientTest.h b/core/src/sdk/examples/partition/src/ClientTest.h similarity index 100% rename from core/src/sdk/examples/grpcsimple/src/ClientTest.h rename to core/src/sdk/examples/partition/src/ClientTest.h diff --git a/core/src/sdk/examples/grpcsimple/CMakeLists.txt b/core/src/sdk/examples/simple/CMakeLists.txt similarity index 98% rename from core/src/sdk/examples/grpcsimple/CMakeLists.txt rename to core/src/sdk/examples/simple/CMakeLists.txt index 77542ed2a7..82680e31be 100644 --- a/core/src/sdk/examples/grpcsimple/CMakeLists.txt +++ b/core/src/sdk/examples/simple/CMakeLists.txt @@ -17,12 +17,12 @@ # under the License. #------------------------------------------------------------------------------- - aux_source_directory(src src_files) add_executable(sdk_simple main.cpp ${src_files} + ${util_files} ) target_link_libraries(sdk_simple diff --git a/core/src/sdk/examples/grpcsimple/main.cpp b/core/src/sdk/examples/simple/main.cpp similarity index 98% rename from core/src/sdk/examples/grpcsimple/main.cpp rename to core/src/sdk/examples/simple/main.cpp index c31f491afb..c08741606c 100644 --- a/core/src/sdk/examples/grpcsimple/main.cpp +++ b/core/src/sdk/examples/simple/main.cpp @@ -20,7 +20,7 @@ #include #include -#include "src/ClientTest.h" +#include "sdk/examples/simple/src/ClientTest.h" void print_help(const std::string& app_name); diff --git a/core/src/sdk/examples/simple/src/ClientTest.cpp b/core/src/sdk/examples/simple/src/ClientTest.cpp new file mode 100644 index 0000000000..9045168f2a --- /dev/null +++ b/core/src/sdk/examples/simple/src/ClientTest.cpp @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/simple/src/ClientTest.h" +#include "MilvusApi.h" +#include "sdk/examples/utils/TimeRecorder.h" +#include "sdk/examples/utils/Utils.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str(); + +constexpr int64_t TABLE_DIMENSION = 512; +constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; +constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2; +constexpr int64_t BATCH_ROW_COUNT = 100000; +constexpr int64_t NQ = 5; +constexpr int64_t TOP_K = 10; +constexpr int64_t NPROBE = 32; +constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different +constexpr int64_t ADD_VECTOR_LOOP = 5; +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr int32_t N_LIST = 15000; + +milvus::TableSchema +BuildTableSchema() { + milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE}; + return tb_schema; +} + +milvus::IndexParam +BuildIndexParam() { + milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST}; + return index_param; +} + +} // namespace + +void +ClientTest::Test(const std::string& address, const std::string& port) { + std::shared_ptr conn = milvus::Connection::Create(); + + milvus::Status stat; + { // connect server + milvus::ConnectParam param = {address, port}; + stat = conn->Connect(param); + std::cout << "Connect function call status: " << stat.message() << std::endl; + } + + { // server version + std::string version = conn->ServerVersion(); + std::cout << "Server version: " << version << std::endl; + } + + { // sdk version + std::string version = conn->ClientVersion(); + std::cout << "SDK version: " << version << std::endl; + } + + { // show tables + std::vector tables; + stat = conn->ShowTables(tables); + std::cout << "ShowTables function call status: " << stat.message() << std::endl; + std::cout << "All tables: " << std::endl; + for (auto& table : tables) { + int64_t row_count = 0; + // conn->DropTable(table); + stat = conn->CountTable(table, row_count); + std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl; + } + } + + { // create table + milvus::TableSchema tb_schema = BuildTableSchema(); + stat = conn->CreateTable(tb_schema); + std::cout << "CreateTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + + bool has_table = conn->HasTable(tb_schema.table_name); + if (has_table) { + std::cout << "Table is created" << std::endl; + } + } + + { // describe table + milvus::TableSchema tb_schema; + stat = conn->DescribeTable(TABLE_NAME, tb_schema); + std::cout << "DescribeTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + } + + { // insert vectors + for (int i = 0; i < ADD_VECTOR_LOOP; i++) { + std::vector record_array; + std::vector record_ids; + int64_t begin_index = i * BATCH_ROW_COUNT; + { // generate vectors + milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i)); + milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids, + TABLE_DIMENSION); + } + + std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i); + milvus_sdk::TimeRecorder rc(title); + stat = conn->Insert(TABLE_NAME, "", record_array, record_ids); + std::cout << "InsertVector function call status: " << stat.message() << std::endl; + std::cout << "Returned id array count: " << record_ids.size() << std::endl; + } + } + + std::vector> search_record_array; + { // build search vectors + for (int64_t i = 0; i < NQ; i++) { + std::vector record_array; + std::vector record_ids; + int64_t index = i * BATCH_ROW_COUNT + SEARCH_TARGET; + milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION); + search_record_array.push_back(std::make_pair(record_ids[0], record_array[0])); + } + } + + milvus_sdk::Utils::Sleep(3); + { // search vectors + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // wait unit build index finish + std::cout << "Wait until create all index done" << std::endl; + milvus::IndexParam index1 = BuildIndexParam(); + milvus_sdk::Utils::PrintIndexParam(index1); + stat = conn->CreateIndex(index1); + std::cout << "CreateIndex function call status: " << stat.message() << std::endl; + + milvus::IndexParam index2; + stat = conn->DescribeIndex(TABLE_NAME, index2); + std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintIndexParam(index2); + } + + { // preload table + stat = conn->PreloadTable(TABLE_NAME); + std::cout << "PreloadTable function call status: " << stat.message() << std::endl; + } + + { // search vectors + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // drop index + stat = conn->DropIndex(TABLE_NAME); + std::cout << "DropIndex function call status: " << stat.message() << std::endl; + + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // delete by range + milvus::Range rg; + rg.start_value = milvus_sdk::Utils::CurrentTmDate(-3); + rg.end_value = milvus_sdk::Utils::CurrentTmDate(-2); + + stat = conn->DeleteByDate(TABLE_NAME, rg); + std::cout << "DeleteByDate function call status: " << stat.message() << std::endl; + } + + { // drop table + stat = conn->DropTable(TABLE_NAME); + std::cout << "DropTable function call status: " << stat.message() << std::endl; + } + + { // server status + std::string status = conn->ServerStatus(); + std::cout << "Server status before disconnect: " << status << std::endl; + } + milvus::Connection::Destroy(conn); + { // server status + std::string status = conn->ServerStatus(); + std::cout << "Server status after disconnect: " << status << std::endl; + } +} diff --git a/core/src/sdk/examples/simple/src/ClientTest.h b/core/src/sdk/examples/simple/src/ClientTest.h new file mode 100644 index 0000000000..b028b63f44 --- /dev/null +++ b/core/src/sdk/examples/simple/src/ClientTest.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +class ClientTest { + public: + void + Test(const std::string& address, const std::string& port); +}; diff --git a/core/src/sdk/examples/utils/TimeRecorder.cpp b/core/src/sdk/examples/utils/TimeRecorder.cpp new file mode 100644 index 0000000000..cdf9eda5ec --- /dev/null +++ b/core/src/sdk/examples/utils/TimeRecorder.cpp @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/utils/TimeRecorder.h" + +#include + +namespace milvus_sdk { + +TimeRecorder::TimeRecorder(const std::string& title) : title_(title) { + start_ = std::chrono::system_clock::now(); + std::cout << title_ << " begin..." << std::endl; +} + +TimeRecorder::~TimeRecorder() { + std::chrono::system_clock::time_point end = std::chrono::system_clock::now(); + int64_t span = (std::chrono::duration_cast(end - start_)).count(); + std::cout << title_ << " totally cost: " << span << " ms" << std::endl; +} + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/TimeRecorder.h b/core/src/sdk/examples/utils/TimeRecorder.h new file mode 100644 index 0000000000..edfb9d2679 --- /dev/null +++ b/core/src/sdk/examples/utils/TimeRecorder.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace milvus_sdk { + +class TimeRecorder { + public: + explicit TimeRecorder(const std::string& title); + + ~TimeRecorder(); + + private: + std::string title_; + std::chrono::system_clock::time_point start_; +}; + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/Utils.cpp b/core/src/sdk/examples/utils/Utils.cpp new file mode 100644 index 0000000000..c527cf47e1 --- /dev/null +++ b/core/src/sdk/examples/utils/Utils.cpp @@ -0,0 +1,223 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/utils/Utils.h" +#include "sdk/examples/utils/TimeRecorder.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace milvus_sdk { + +constexpr int64_t SECONDS_EACH_HOUR = 3600; + +#define BLOCK_SPLITER std::cout << "===========================================" << std::endl; + +std::string +Utils::CurrentTime() { + time_t tt; + time(&tt); + tt = tt + 8 * SECONDS_EACH_HOUR; + tm t; + gmtime_r(&tt, &t); + + std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" + + std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) + + "_" + std::to_string(t.tm_sec); + + return str; +} + +std::string +Utils::CurrentTmDate(int64_t offset_day) { + time_t tt; + time(&tt); + tt = tt + 8 * SECONDS_EACH_HOUR; + tt = tt + 24 * SECONDS_EACH_HOUR * offset_day; + tm t; + gmtime_r(&tt, &t); + + std::string str = + std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday); + + return str; +} + +void +Utils::Sleep(int seconds) { + std::cout << "Waiting " << seconds << " seconds ..." << std::endl; + sleep(seconds); +} + +const std::string& +Utils::GenTableName() { + static std::string s_id("tbl_" + CurrentTime()); + return s_id; +} + +std::string +Utils::MetricTypeName(const milvus::MetricType& metric_type) { + switch (metric_type) { + case milvus::MetricType::L2: + return "L2 distance"; + case milvus::MetricType::IP: + return "Inner product"; + default: + return "Unknown metric type"; + } +} + +std::string +Utils::IndexTypeName(const milvus::IndexType& index_type) { + switch (index_type) { + case milvus::IndexType::cpu_idmap: + return "cpu idmap"; + case milvus::IndexType::gpu_ivfflat: + return "gpu ivflat"; + case milvus::IndexType::gpu_ivfsq8: + return "gpu ivfsq8"; + case milvus::IndexType::mix_nsg: + return "mix nsg"; + default: + return "Unknown index type"; + } +} + +void +Utils::PrintTableSchema(const milvus::TableSchema& tb_schema) { + BLOCK_SPLITER + std::cout << "Table name: " << tb_schema.table_name << std::endl; + std::cout << "Table dimension: " << tb_schema.dimension << std::endl; + std::cout << "Table index file size: " << tb_schema.index_file_size << std::endl; + std::cout << "Table metric type: " << MetricTypeName(tb_schema.metric_type) << std::endl; + BLOCK_SPLITER +} + +void +Utils::PrintPartitionParam(const milvus::PartitionParam& partition_param) { + BLOCK_SPLITER + std::cout << "Table name: " << partition_param.table_name << std::endl; + std::cout << "Partition name: " << partition_param.partition_name << std::endl; + std::cout << "Partition tag: " << partition_param.partition_tag << std::endl; + BLOCK_SPLITER +} + +void +Utils::PrintIndexParam(const milvus::IndexParam& index_param) { + BLOCK_SPLITER + std::cout << "Index table name: " << index_param.table_name << std::endl; + std::cout << "Index type: " << IndexTypeName(index_param.index_type) << std::endl; + std::cout << "Index nlist: " << index_param.nlist << std::endl; + BLOCK_SPLITER +} + +void +Utils::BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array, + std::vector& record_ids, int64_t dimension) { + if (to <= from) { + return; + } + + vector_record_array.clear(); + record_ids.clear(); + for (int64_t k = from; k < to; k++) { + milvus::RowRecord record; + record.data.resize(dimension); + for (int64_t i = 0; i < dimension; i++) { + record.data[i] = (float)(k % (i + 1)); + } + + vector_record_array.emplace_back(record); + record_ids.push_back(k); + } +} + +void +Utils::PrintSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array) { + BLOCK_SPLITER + std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl; + + int32_t index = 0; + for (auto& result : topk_query_result_array) { + auto search_id = search_record_array[index].first; + index++; + std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top " + << std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl; + for (auto& item : result.query_result_arrays) { + std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance); + std::cout << std::endl; + } + } + + BLOCK_SPLITER +} + +void +Utils::CheckSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array) { + BLOCK_SPLITER + int64_t index = 0; + for (auto& result : topk_query_result_array) { + auto result_id = result.query_result_arrays[0].id; + auto search_id = search_record_array[index++].first; + if (result_id != search_id) { + std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl; + } else { + std::cout << "Check result sucessfully" << std::endl; + } + } + BLOCK_SPLITER +} + +void +Utils::DoSearch(std::shared_ptr conn, const std::string& table_name, + const std::vector& partiton_tags, int64_t top_k, int64_t nprobe, + const std::vector>& search_record_array, + std::vector& topk_query_result_array) { + topk_query_result_array.clear(); + + std::vector query_range_array; + milvus::Range rg; + rg.start_value = CurrentTmDate(); + rg.end_value = CurrentTmDate(1); + query_range_array.emplace_back(rg); + + std::vector record_array; + for (auto& pair : search_record_array) { + record_array.push_back(pair.second); + } + + { + BLOCK_SPLITER + milvus_sdk::TimeRecorder rc("search"); + milvus::Status stat = conn->Search(table_name, partiton_tags, record_array, query_range_array, top_k, nprobe, + topk_query_result_array); + std::cout << "SearchVector function call status: " << stat.message() << std::endl; + BLOCK_SPLITER + } + + PrintSearchResult(search_record_array, topk_query_result_array); + CheckSearchResult(search_record_array, topk_query_result_array); +} + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/Utils.h b/core/src/sdk/examples/utils/Utils.h new file mode 100644 index 0000000000..cab0d8810a --- /dev/null +++ b/core/src/sdk/examples/utils/Utils.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "MilvusApi.h" + +#include +#include +#include +#include + +namespace milvus_sdk { + +class Utils { + public: + static std::string + CurrentTime(); + + static std::string + CurrentTmDate(int64_t offset_day = 0); + + static const std::string& + GenTableName(); + + static void + Sleep(int seconds); + + static std::string + MetricTypeName(const milvus::MetricType& metric_type); + + static std::string + IndexTypeName(const milvus::IndexType& index_type); + + static void + PrintTableSchema(const milvus::TableSchema& tb_schema); + + static void + PrintPartitionParam(const milvus::PartitionParam& partition_param); + + static void + PrintIndexParam(const milvus::IndexParam& index_param); + + static void + BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array, + std::vector& record_ids, int64_t dimension); + + static void + PrintSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array); + + static void + CheckSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array); + + static void + DoSearch(std::shared_ptr conn, const std::string& table_name, + const std::vector& partiton_tags, int64_t top_k, int64_t nprobe, + const std::vector>& search_record_array, + std::vector& topk_query_result_array); +}; + +} // namespace milvus_sdk diff --git a/core/src/sdk/grpc/ClientProxy.cpp b/core/src/sdk/grpc/ClientProxy.cpp index 91a11adf8c..3321a9f85e 100644 --- a/core/src/sdk/grpc/ClientProxy.cpp +++ b/core/src/sdk/grpc/ClientProxy.cpp @@ -138,8 +138,8 @@ ClientProxy::CreateIndex(const IndexParam& index_param) { } Status -ClientProxy::Insert(const std::string& table_name, const std::vector& record_array, - std::vector& id_array) { +ClientProxy::Insert(const std::string& table_name, const std::string& partition_tag, + const std::vector& record_array, std::vector& id_array) { Status status = Status::OK(); try { //////////////////////////////////////////////////////////////////////////// @@ -185,6 +185,7 @@ ClientProxy::Insert(const std::string& table_name, const std::vector& #else ::milvus::grpc::InsertParam insert_param; insert_param.set_table_name(table_name); + insert_param.set_partition_tag(partition_tag); for (auto& record : record_array) { ::milvus::grpc::RowRecord* grpc_record = insert_param.add_row_record_array(); @@ -215,15 +216,18 @@ ClientProxy::Insert(const std::string& table_name, const std::vector& } Status -ClientProxy::Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) { +ClientProxy::Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, + int64_t topk, int64_t nprobe, std::vector& topk_query_result_array) { try { // step 1: convert vectors data ::milvus::grpc::SearchParam search_param; search_param.set_table_name(table_name); search_param.set_topk(topk); search_param.set_nprobe(nprobe); + for (auto& tag : partiton_tags) { + search_param.add_partition_tag_array(tag); + } for (auto& record : query_record_array) { ::milvus::grpc::RowRecord* row_record = search_param.add_query_record_array(); for (auto& rec : record.data) { @@ -349,13 +353,13 @@ ClientProxy::DumpTaskTables() const { } Status -ClientProxy::DeleteByRange(milvus::Range& range, const std::string& table_name) { +ClientProxy::DeleteByDate(const std::string& table_name, const milvus::Range& range) { try { - ::milvus::grpc::DeleteByRangeParam delete_by_range_param; + ::milvus::grpc::DeleteByDateParam delete_by_range_param; delete_by_range_param.set_table_name(table_name); delete_by_range_param.mutable_range()->set_start_value(range.start_value); delete_by_range_param.mutable_range()->set_end_value(range.end_value); - return client_ptr_->DeleteByRange(delete_by_range_param); + return client_ptr_->DeleteByDate(delete_by_range_param); } catch (std::exception& ex) { return Status(StatusCode::UnknownError, "fail to delete by range: " + std::string(ex.what())); } @@ -401,4 +405,51 @@ ClientProxy::DropIndex(const std::string& table_name) const { } } +Status +ClientProxy::CreatePartition(const PartitionParam& partition_param) { + try { + ::milvus::grpc::PartitionParam grpc_partition_param; + grpc_partition_param.set_table_name(partition_param.table_name); + grpc_partition_param.set_partition_name(partition_param.partition_name); + grpc_partition_param.set_tag(partition_param.partition_tag); + Status status = client_ptr_->CreatePartition(grpc_partition_param); + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to create partition: " + std::string(ex.what())); + } +} + +Status +ClientProxy::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const { + try { + ::milvus::grpc::TableName grpc_table_name; + grpc_table_name.set_table_name(table_name); + ::milvus::grpc::PartitionList grpc_partition_list; + Status status = client_ptr_->ShowPartitions(grpc_table_name, grpc_partition_list); + partition_array.resize(grpc_partition_list.partition_array_size()); + for (uint64_t i = 0; i < grpc_partition_list.partition_array_size(); ++i) { + partition_array[i].table_name = grpc_partition_list.partition_array(i).table_name(); + partition_array[i].partition_name = grpc_partition_list.partition_array(i).partition_name(); + partition_array[i].partition_tag = grpc_partition_list.partition_array(i).tag(); + } + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to show partitions: " + std::string(ex.what())); + } +} + +Status +ClientProxy::DropPartition(const PartitionParam& partition_param) { + try { + ::milvus::grpc::PartitionParam grpc_partition_param; + grpc_partition_param.set_table_name(partition_param.table_name); + grpc_partition_param.set_partition_name(partition_param.partition_name); + grpc_partition_param.set_tag(partition_param.partition_tag); + Status status = client_ptr_->DropPartition(grpc_partition_param); + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to drop partition: " + std::string(ex.what())); + } +} + } // namespace milvus diff --git a/core/src/sdk/grpc/ClientProxy.h b/core/src/sdk/grpc/ClientProxy.h index dbeacc1380..eb21e9c4b5 100644 --- a/core/src/sdk/grpc/ClientProxy.h +++ b/core/src/sdk/grpc/ClientProxy.h @@ -54,13 +54,13 @@ class ClientProxy : public Connection { CreateIndex(const IndexParam& index_param) override; Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) override; Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) override; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) override; Status DescribeTable(const std::string& table_name, TableSchema& table_schema) override; @@ -84,7 +84,7 @@ class ClientProxy : public Connection { DumpTaskTables() const override; Status - DeleteByRange(Range& range, const std::string& table_name) override; + DeleteByDate(const std::string& table_name, const Range& range) override; Status PreloadTable(const std::string& table_name) const override; @@ -95,6 +95,15 @@ class ClientProxy : public Connection { Status DropIndex(const std::string& table_name) const override; + Status + CreatePartition(const PartitionParam& partition_param) override; + + Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override; + + Status + DropPartition(const PartitionParam& partition_param) override; + private: std::shared_ptr<::grpc::Channel> channel_; diff --git a/core/src/sdk/grpc/GrpcClient.cpp b/core/src/sdk/grpc/GrpcClient.cpp index 5c27c3b73f..29f378276d 100644 --- a/core/src/sdk/grpc/GrpcClient.cpp +++ b/core/src/sdk/grpc/GrpcClient.cpp @@ -259,13 +259,13 @@ GrpcClient::PreloadTable(milvus::grpc::TableName& table_name) { } Status -GrpcClient::DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param) { +GrpcClient::DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param) { ClientContext context; ::milvus::grpc::Status response; - ::grpc::Status grpc_status = stub_->DeleteByRange(&context, delete_by_range_param, &response); + ::grpc::Status grpc_status = stub_->DeleteByDate(&context, delete_by_range_param, &response); if (!grpc_status.ok()) { - std::cerr << "DeleteByRange gRPC failed!" << std::endl; + std::cerr << "DeleteByDate gRPC failed!" << std::endl; return Status(StatusCode::RPCFailed, grpc_status.error_message()); } @@ -317,4 +317,57 @@ GrpcClient::DropIndex(grpc::TableName& table_name) { return Status::OK(); } +Status +GrpcClient::CreatePartition(const grpc::PartitionParam& partition_param) { + ClientContext context; + ::milvus::grpc::Status response; + ::grpc::Status grpc_status = stub_->CreatePartition(&context, partition_param, &response); + + if (!grpc_status.ok()) { + std::cerr << "CreatePartition gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (response.error_code() != grpc::SUCCESS) { + std::cerr << response.reason() << std::endl; + return Status(StatusCode::ServerFailed, response.reason()); + } + return Status::OK(); +} + +Status +GrpcClient::ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const { + ClientContext context; + ::grpc::Status grpc_status = stub_->ShowPartitions(&context, table_name, &partition_array); + + if (!grpc_status.ok()) { + std::cerr << "ShowPartitions gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (partition_array.status().error_code() != grpc::SUCCESS) { + std::cerr << partition_array.status().reason() << std::endl; + return Status(StatusCode::ServerFailed, partition_array.status().reason()); + } + return Status::OK(); +} + +Status +GrpcClient::DropPartition(const ::milvus::grpc::PartitionParam& partition_param) { + ClientContext context; + ::milvus::grpc::Status response; + ::grpc::Status grpc_status = stub_->DropPartition(&context, partition_param, &response); + + if (!grpc_status.ok()) { + std::cerr << "DropPartition gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (response.error_code() != grpc::SUCCESS) { + std::cerr << response.reason() << std::endl; + return Status(StatusCode::ServerFailed, response.reason()); + } + return Status::OK(); +} + } // namespace milvus diff --git a/core/src/sdk/grpc/GrpcClient.h b/core/src/sdk/grpc/GrpcClient.h index d2e6ae5095..8599f8a53f 100644 --- a/core/src/sdk/grpc/GrpcClient.h +++ b/core/src/sdk/grpc/GrpcClient.h @@ -72,7 +72,7 @@ class GrpcClient { Cmd(std::string& result, const std::string& cmd); Status - DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param); + DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param); Status PreloadTable(grpc::TableName& table_name); @@ -83,6 +83,15 @@ class GrpcClient { Status DropIndex(grpc::TableName& table_name); + Status + CreatePartition(const grpc::PartitionParam& partition_param); + + Status + ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const; + + Status + DropPartition(const ::milvus::grpc::PartitionParam& partition_param); + Status Disconnect(); diff --git a/core/src/sdk/include/MilvusApi.h b/core/src/sdk/include/MilvusApi.h index 68fe0e9d5c..8c92375649 100644 --- a/core/src/sdk/include/MilvusApi.h +++ b/core/src/sdk/include/MilvusApi.h @@ -64,7 +64,7 @@ struct TableSchema { /** * @brief Range information - * for DATE partition, the format is like: 'year-month-day' + * for DATE range, the format is like: 'year-month-day' */ struct Range { std::string start_value; ///< Range start @@ -102,6 +102,17 @@ struct IndexParam { int32_t nlist; }; +/** + * @brief partition parameters + */ +struct PartitionParam { + std::string table_name; + std::string partition_name; + std::string partition_tag; +}; + +using PartitionList = std::vector; + /** * @brief SDK main class */ @@ -195,7 +206,7 @@ class Connection { * * This method is used to create table * - * @param table_name, table name is going to be tested. + * @param table_name, target table's name. * * @return Indicate if table is cexist */ @@ -205,9 +216,9 @@ class Connection { /** * @brief Delete table method * - * This method is used to delete table. + * This method is used to delete table(and its partitions). * - * @param table_name, table name is going to be deleted. + * @param table_name, target table's name. * * @return Indicate if table is delete successfully. */ @@ -217,7 +228,7 @@ class Connection { /** * @brief Create index method * - * This method is used to create index for whole table + * This method is used to create index for whole table(and its partitions). * * @param IndexParam * table_name, table name is going to be create index. @@ -235,14 +246,15 @@ class Connection { * * This method is used to add vector array to table. * - * @param table_name, table_name is inserted. + * @param table_name, target table's name. + * @param partition_tag, target partition's tag, keep empty if no partition. * @param record_array, vector array is inserted. * @param id_array, after inserted every vector is given a id. * * @return Indicate if vector array are inserted successfully */ virtual Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) = 0; /** @@ -250,7 +262,8 @@ class Connection { * * This method is used to query vector in table. * - * @param table_name, table_name is queried. + * @param table_name, target table's name, keep empty if no partition. + * @param partition_tags, target partitions. * @param query_record_array, all vector are going to be queried. * @param query_range_array, time ranges, if not specified, will search in whole table * @param topk, how many similarity vectors will be searched. @@ -259,16 +272,16 @@ class Connection { * @return Indicate if query is successful. */ virtual Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) = 0; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) = 0; /** * @brief Show table description * * This method is used to show table information. * - * @param table_name, which table is show. + * @param table_name, target table's name. * @param table_schema, table_schema is given when operation is successful. * * @return Indicate if this operation is successful. @@ -281,8 +294,8 @@ class Connection { * * This method is used to get table row count. * - * @param table_name, table's name. - * @param row_count, table total row count. + * @param table_name, target table's name. + * @param row_count, table total row count(including partitions). * * @return Indicate if this operation is successful. */ @@ -331,21 +344,28 @@ class Connection { virtual std::string ServerStatus() const = 0; + /** + * @brief dump server tasks information + * + * This method is internal used. + * + * @return Server status. + */ virtual std::string DumpTaskTables() const = 0; /** - * @brief delete tables by range + * @brief delete tables by date range * - * This method is used to delete tables by range. + * This method is used to delete table data by date range. * + * @param table_name, target table's name. * @param Range, table range to delete. - * @param table_name * * @return Indicate if this operation is successful. */ virtual Status - DeleteByRange(Range& range, const std::string& table_name) = 0; + DeleteByDate(const std::string& table_name, const Range& range) = 0; /** * @brief preload table @@ -364,9 +384,10 @@ class Connection { * * This method is used to describe index * - * @param table_name + * @param table_name, target table's name. + * @param index_param, returned index information. * - * @return index informations and indicate if this operation is successful. + * @return Indicate if this operation is successful. */ virtual Status DescribeIndex(const std::string& table_name, IndexParam& index_param) const = 0; @@ -374,14 +395,53 @@ class Connection { /** * @brief drop index * - * This method is used to drop index + * This method is used to drop index of table(and its partitions) * - * @param table_name + * @param table_name, target table's name. * * @return Indicate if this operation is successful. */ virtual Status DropIndex(const std::string& table_name) const = 0; + + /** + * @brief Create partition method + * + * This method is used to create table partition + * + * @param param, use to provide partition information to be created. + * + * @return Indicate if partition is created successfully + */ + virtual Status + CreatePartition(const PartitionParam& param) = 0; + + /** + * @brief Test table existence method + * + * This method is used to create table + * + * @param table_name, table name is going to be tested. + * @param partition_array, partition array of the table. + * + * @return Indicate if this operation is successful + */ + virtual Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const = 0; + + /** + * @brief Delete partition method + * + * This method is used to delete table partition. + * + * @param param, target partition to be deleted. + * NOTE: if param.table_name is empty, you must specify param.partition_name, + * else you can specify param.table_name and param.tag and let the param.partition_name be empty + * + * @return Indicate if partition is delete successfully. + */ + virtual Status + DropPartition(const PartitionParam& param) = 0; }; } // namespace milvus diff --git a/core/src/sdk/interface/ConnectionImpl.cpp b/core/src/sdk/interface/ConnectionImpl.cpp index 7034ce4a4d..04531b46eb 100644 --- a/core/src/sdk/interface/ConnectionImpl.cpp +++ b/core/src/sdk/interface/ConnectionImpl.cpp @@ -83,16 +83,16 @@ ConnectionImpl::CreateIndex(const IndexParam& index_param) { } Status -ConnectionImpl::Insert(const std::string& table_name, const std::vector& record_array, - std::vector& id_array) { - return client_proxy_->Insert(table_name, record_array, id_array); +ConnectionImpl::Insert(const std::string& table_name, const std::string& partition_tag, + const std::vector& record_array, std::vector& id_array) { + return client_proxy_->Insert(table_name, partition_tag, record_array, id_array); } Status -ConnectionImpl::Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) { - return client_proxy_->Search(table_name, query_record_array, query_range_array, topk, nprobe, +ConnectionImpl::Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, + int64_t topk, int64_t nprobe, std::vector& topk_query_result_array) { + return client_proxy_->Search(table_name, partiton_tags, query_record_array, query_range_array, topk, nprobe, topk_query_result_array); } @@ -127,8 +127,8 @@ ConnectionImpl::DumpTaskTables() const { } Status -ConnectionImpl::DeleteByRange(Range& range, const std::string& table_name) { - return client_proxy_->DeleteByRange(range, table_name); +ConnectionImpl::DeleteByDate(const std::string& table_name, const Range& range) { + return client_proxy_->DeleteByDate(table_name, range); } Status @@ -146,4 +146,19 @@ ConnectionImpl::DropIndex(const std::string& table_name) const { return client_proxy_->DropIndex(table_name); } +Status +ConnectionImpl::CreatePartition(const PartitionParam& param) { + return client_proxy_->CreatePartition(param); +} + +Status +ConnectionImpl::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const { + return client_proxy_->ShowPartitions(table_name, partition_array); +} + +Status +ConnectionImpl::DropPartition(const PartitionParam& param) { + return client_proxy_->DropPartition(param); +} + } // namespace milvus diff --git a/core/src/sdk/interface/ConnectionImpl.h b/core/src/sdk/interface/ConnectionImpl.h index 6bc3432bc4..199d22bf9d 100644 --- a/core/src/sdk/interface/ConnectionImpl.h +++ b/core/src/sdk/interface/ConnectionImpl.h @@ -56,13 +56,13 @@ class ConnectionImpl : public Connection { CreateIndex(const IndexParam& index_param) override; Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) override; Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) override; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) override; Status DescribeTable(const std::string& table_name, TableSchema& table_schema) override; @@ -86,7 +86,7 @@ class ConnectionImpl : public Connection { DumpTaskTables() const override; Status - DeleteByRange(Range& range, const std::string& table_name) override; + DeleteByDate(const std::string& table_name, const Range& range) override; Status PreloadTable(const std::string& table_name) const override; @@ -97,6 +97,15 @@ class ConnectionImpl : public Connection { Status DropIndex(const std::string& table_name) const override; + Status + CreatePartition(const PartitionParam& param) override; + + Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override; + + Status + DropPartition(const PartitionParam& param) override; + private: std::shared_ptr client_proxy_; }; diff --git a/core/src/server/Config.cpp b/core/src/server/Config.cpp index 1d87e9aa6d..f130e73a85 100644 --- a/core/src/server/Config.cpp +++ b/core/src/server/Config.cpp @@ -25,6 +25,7 @@ #include "config/YamlConfigMgr.h" #include "server/Config.h" #include "utils/CommonUtil.h" +#include "utils/StringHelpFunctions.h" #include "utils/ValidationUtil.h" namespace milvus { @@ -306,6 +307,7 @@ Config::ResetDefaultConfig() { return s; } +#ifdef MILVUS_GPU_VERSION s = SetCacheConfigGpuCacheCapacity(CONFIG_CACHE_GPU_CACHE_CAPACITY_DEFAULT); if (!s.ok()) { return s; @@ -315,6 +317,7 @@ Config::ResetDefaultConfig() { if (!s.ok()) { return s; } +#endif s = SetCacheConfigCacheInsertData(CONFIG_CACHE_CACHE_INSERT_DATA_DEFAULT); if (!s.ok()) { @@ -343,6 +346,11 @@ Config::ResetDefaultConfig() { return s; } + s = SetResourceConfigSearchResources(CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT); + if (!s.ok()) { + return s; + } + s = SetResourceConfigIndexBuildDevice(CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT); if (!s.ok()) { return s; @@ -403,8 +411,7 @@ Status Config::CheckServerConfigDeployMode(const std::string& value) { if (value != "single" && value != "cluster_readonly" && value != "cluster_writable") { return Status(SERVER_INVALID_ARGUMENT, - "server_config.deploy_mode is not one of " - "single, cluster_readonly, and cluster_writable."); + "server_config.deploy_mode is not one of single, cluster_readonly, and cluster_writable."); } return Status::OK(); } @@ -592,15 +599,15 @@ Config::CheckCacheConfigGpuCacheCapacity(const std::string& value) { return Status(SERVER_INVALID_ARGUMENT, msg); } else { uint64_t gpu_cache_capacity = std::stoi(value) * GB; - int gpu_index; - Status s = GetResourceConfigIndexBuildDevice(gpu_index); + int device_id; + Status s = GetResourceConfigIndexBuildDevice(device_id); if (!s.ok()) { return s; } size_t gpu_memory; - if (!ValidationUtil::GetGpuMemory(gpu_index, gpu_memory).ok()) { - std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(gpu_index); + if (!ValidationUtil::GetGpuMemory(device_id, gpu_memory).ok()) { + std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(device_id); return Status(SERVER_UNEXPECTED_ERROR, msg); } else if (gpu_cache_capacity >= gpu_memory) { std::string msg = "Invalid gpu cache capacity: " + value + @@ -689,29 +696,33 @@ Config::CheckResourceConfigMode(const std::string& value) { } Status -CheckGpuDevice(const std::string& value) { +CheckResource(const std::string& value) { std::string s = value; std::transform(s.begin(), s.end(), s.begin(), ::tolower); + #ifdef MILVUS_CPU_VERSION if (s != "cpu") { return Status(SERVER_INVALID_ARGUMENT, "Invalid CPU resource: " + s); } #else - const std::regex pat("gpu(\\d+)"); - std::cmatch m; - if (!std::regex_match(value.c_str(), m, pat)) { - std::string msg = "Invalid gpu device: " + value + - ". Possible reason: resource_config.search_resources does not match your hardware."; + const std::regex pat("cpu|gpu(\\d+)"); + std::smatch m; + if (!std::regex_match(s, m, pat)) { + std::string msg = "Invalid search resource: " + value + + ". Possible reason: resource_config.search_resources is not in the format of cpux or gpux"; return Status(SERVER_INVALID_ARGUMENT, msg); } - int32_t gpu_index = std::stoi(value.substr(3)); - if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { - std::string msg = "Invalid gpu device: " + value + - ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + if (s.compare(0, 3, "gpu") == 0) { + int32_t gpu_index = std::stoi(s.substr(3)); + if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { + std::string msg = "Invalid search resource: " + value + + ". Possible reason: resource_config.search_resources does not match your hardware."; + return Status(SERVER_INVALID_ARGUMENT, msg); + } } #endif + return Status::OK(); } @@ -724,38 +735,20 @@ Config::CheckResourceConfigSearchResources(const std::vector& value return Status(SERVER_INVALID_ARGUMENT, msg); } - bool cpu_found = false, gpu_found = false; - for (auto& device : value) { - if (device == "cpu") { - cpu_found = true; - continue; + for (auto& resource : value) { + auto status = CheckResource(resource); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } - if (CheckGpuDevice(device).ok()) { - gpu_found = true; - } else { - std::string msg = "Invalid search resource: " + device + - ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); - } - } - - if (cpu_found && !gpu_found) { - std::string msg = - "Invalid search resource. Possible reason: resource_config.search_resources has only CPU resource."; - return Status(SERVER_INVALID_ARGUMENT, msg); } return Status::OK(); } Status Config::CheckResourceConfigIndexBuildDevice(const std::string& value) { - // if (value == "cpu") { - // return Status::OK(); - // } - if (!CheckGpuDevice(value).ok()) { - std::string msg = "Invalid index build device: " + value + - ". Possible reason: resource_config.index_build_device does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + auto status = CheckResource(value); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } return Status::OK(); } @@ -796,6 +789,22 @@ Config::GetConfigStr(const std::string& parent_key, const std::string& child_key return value; } +std::string +Config::GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim, + const std::string& default_value) { + std::string value; + if (!GetConfigValueInMem(parent_key, child_key, value).ok()) { + std::vector sequence = GetConfigNode(parent_key).GetSequence(child_key); + if (sequence.empty()) { + value = default_value; + } else { + server::StringHelpFunctions::MergeStringWithDelimeter(sequence, delim, value); + } + SetConfigValueInMem(parent_key, child_key, value); + } + return value; +} + Status Config::GetServerConfigAddress(std::string& value) { value = GetConfigStr(CONFIG_SERVER, CONFIG_SERVER_ADDRESS, CONFIG_SERVER_ADDRESS_DEFAULT); @@ -1019,8 +1028,10 @@ Config::GetResourceConfigMode(std::string& value) { Status Config::GetResourceConfigSearchResources(std::vector& value) { - ConfigNode resource_config = GetConfigNode(CONFIG_RESOURCE); - value = resource_config.GetSequence(CONFIG_RESOURCE_SEARCH_RESOURCES); + std::string str = + GetConfigSequenceStr(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES, + CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT); + server::StringHelpFunctions::SplitStringByDelimeter(str, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, value); return CheckResourceConfigSearchResources(value); } @@ -1033,10 +1044,10 @@ Config::GetResourceConfigIndexBuildDevice(int32_t& value) { return s; } - if (str != "cpu") { - value = std::stoi(str.substr(3)); + if (str == "cpu") { + value = CPU_DEVICE_ID; } else { - value = -1; + value = std::stoi(str.substr(3)); } return Status::OK(); @@ -1163,7 +1174,7 @@ Config::SetMetricConfigEnableMonitor(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_ENABLE_MONITOR, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_ENABLE_MONITOR, value); return Status::OK(); } @@ -1174,7 +1185,7 @@ Config::SetMetricConfigCollector(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_COLLECTOR, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_COLLECTOR, value); return Status::OK(); } @@ -1185,7 +1196,7 @@ Config::SetMetricConfigPrometheusPort(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_PROMETHEUS_PORT, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_PROMETHEUS_PORT, value); return Status::OK(); } @@ -1197,7 +1208,7 @@ Config::SetCacheConfigCpuCacheCapacity(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_CAPACITY, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_CAPACITY, value); return Status::OK(); } @@ -1208,7 +1219,7 @@ Config::SetCacheConfigCpuCacheThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value); return Status::OK(); } @@ -1219,7 +1230,7 @@ Config::SetCacheConfigGpuCacheCapacity(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_CAPACITY, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_CAPACITY, value); return Status::OK(); } @@ -1230,7 +1241,7 @@ Config::SetCacheConfigGpuCacheThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value); return Status::OK(); } @@ -1241,7 +1252,7 @@ Config::SetCacheConfigCacheInsertData(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CACHE_INSERT_DATA, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CACHE_INSERT_DATA, value); return Status::OK(); } @@ -1253,7 +1264,7 @@ Config::SetEngineConfigUseBlasThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value); return Status::OK(); } @@ -1264,7 +1275,7 @@ Config::SetEngineConfigOmpThreadNum(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_OMP_THREAD_NUM, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_OMP_THREAD_NUM, value); return Status::OK(); } @@ -1275,7 +1286,7 @@ Config::SetEngineConfigGpuSearchThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value); return Status::OK(); } @@ -1287,7 +1298,21 @@ Config::SetResourceConfigMode(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_MODE, value); + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_MODE, value); + return Status::OK(); +} + +Status +Config::SetResourceConfigSearchResources(const std::string& value) { + std::vector res_vec; + server::StringHelpFunctions::SplitStringByDelimeter(value, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_vec); + + Status s = CheckResourceConfigSearchResources(res_vec); + if (!s.ok()) { + return s; + } + + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES, value); return Status::OK(); } @@ -1298,7 +1323,7 @@ Config::SetResourceConfigIndexBuildDevice(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value); + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value); return Status::OK(); } diff --git a/core/src/server/Config.h b/core/src/server/Config.h index a09939a9cb..b82614e0b9 100644 --- a/core/src/server/Config.h +++ b/core/src/server/Config.h @@ -92,12 +92,19 @@ static const char* CONFIG_RESOURCE = "resource_config"; static const char* CONFIG_RESOURCE_MODE = "mode"; static const char* CONFIG_RESOURCE_MODE_DEFAULT = "simple"; static const char* CONFIG_RESOURCE_SEARCH_RESOURCES = "search_resources"; +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER = ","; +#ifdef MILVUS_CPU_VERSION +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu"; +#else +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu,gpu0"; +#endif static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE = "index_build_device"; #ifdef MILVUS_CPU_VERSION static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "cpu"; #else static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "gpu0"; #endif +const int32_t CPU_DEVICE_ID = -1; class Config { public: @@ -185,6 +192,9 @@ class Config { std::string GetConfigStr(const std::string& parent_key, const std::string& child_key, const std::string& default_value = ""); + std::string + GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim = ",", + const std::string& default_value = ""); public: /* server config */ @@ -306,6 +316,8 @@ class Config { Status SetResourceConfigMode(const std::string& value); Status + SetResourceConfigSearchResources(const std::string& value); + Status SetResourceConfigIndexBuildDevice(const std::string& value); private: diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.cpp b/core/src/server/grpc_impl/GrpcRequestHandler.cpp index a9ee3d77d0..bb38349b4a 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.cpp +++ b/core/src/server/grpc_impl/GrpcRequestHandler.cpp @@ -150,9 +150,9 @@ GrpcRequestHandler::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Co } ::grpc::Status -GrpcRequestHandler::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response) { - BaseTaskPtr task_ptr = DeleteByRangeTask::Create(request); +GrpcRequestHandler::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = DeleteByDateTask::Create(request); ::milvus::grpc::Status grpc_status; GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); response->set_error_code(grpc_status.error_code()); @@ -193,6 +193,36 @@ GrpcRequestHandler::DropIndex(::grpc::ServerContext* context, const ::milvus::gr return ::grpc::Status::OK; } +::grpc::Status +GrpcRequestHandler::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = CreatePartitionTask::Create(request); + GrpcRequestScheduler::ExecTask(task_ptr, response); + return ::grpc::Status::OK; +} + +::grpc::Status +GrpcRequestHandler::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response) { + BaseTaskPtr task_ptr = ShowPartitionsTask::Create(request->table_name(), response); + ::milvus::grpc::Status grpc_status; + GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); + response->mutable_status()->set_reason(grpc_status.reason()); + response->mutable_status()->set_error_code(grpc_status.error_code()); + return ::grpc::Status::OK; +} + +::grpc::Status +GrpcRequestHandler::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = DropPartitionTask::Create(request); + ::milvus::grpc::Status grpc_status; + GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); + response->set_reason(grpc_status.reason()); + response->set_error_code(grpc_status.error_code()); + return ::grpc::Status::OK; +} + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.h b/core/src/server/grpc_impl/GrpcRequestHandler.h index 1a9b591154..11a7efbb98 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.h +++ b/core/src/server/grpc_impl/GrpcRequestHandler.h @@ -28,296 +28,168 @@ namespace server { namespace grpc { class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service { public: - /** - * @brief Create table method - * - * This method is used to create table - * - * @param context, add context for every RPC - * @param request, used to provide table information to be created. - * @param response, used to get the status - * - * @return status - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to create table + // + // @param TableSchema, use to provide table information to be created. + // + // @return Status ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override; - - /** - * @brief Test table existence method - * - * This method is used to test table existence. - * - * @param context, add context for every RPC - * @param request, table name is going to be tested. - * @param response, get the bool reply of hastable - * - * @return status - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to test table existence. + // + // @param TableName, table name is going to be tested. + // + // @return BoolReply ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override; - - /** - * @brief Drop table method - * - * This method is used to drop table. - * - * @param context, add context for every RPC - * @param request, table name is going to be deleted. - * @param response, get the status of droptable - * - * @return status - * - * @param request - * @param response - * @param context - */ - ::grpc::Status - DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief build index by table method - * - * This method is used to build index by table in sync. - * - * @param context, add context for every RPC - * @param request, table name is going to be built index. - * @param response, get the status of buildindex - * - * @return status - * - * @param request - * @param response - * @param context - */ - ::grpc::Status - CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief Insert vector array to table - * - * This method is used to insert vector array to table. - * - * @param context, add context for every RPC - * @param request, table_name is inserted. - * @param response, vector array is inserted. - * - * @return status - * - * @param context - * @param request - * @param response - */ - ::grpc::Status - Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, - ::milvus::grpc::VectorIds* response) override; - - /** - * @brief Query vector - * - * This method is used to query vector in table. - * - * @param context, add context for every RPC - * @param request: - * table_name, table_name is queried. - * query_record_array, all vector are going to be queried. - * query_range_array, optional ranges for conditional search. If not specified, search whole table - * topk, how many similarity vectors will be searched. - * - * @param writer, write query result array. - * - * @return status - * - * @param context - * @param request - * @param writer - */ - ::grpc::Status - Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, - ::milvus::grpc::TopKQueryResultList* response) override; - - /** - * @brief Internal use query interface - * - * This method is used to query vector in specified files. - * - * @param context, add context for every RPC - * @param request: - * file_id_array, specified files id array, queried. - * query_record_array, all vector are going to be queried. - * query_range_array, optional ranges for conditional search. If not specified, search whole table - * topk, how many similarity vectors will be searched. - * - * @param writer, write query result array. - * - * @return status - * - * @param context - * @param request - * @param writer - */ - ::grpc::Status - SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, - ::milvus::grpc::TopKQueryResultList* response) override; - - /** - * @brief Get table schema - * - * This method is used to get table schema. - * - * @param context, add context for every RPC - * @param request, target table name. - * @param response, table schema - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to get table schema. + // + // @param TableName, target table name. + // + // @return TableSchema ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override; - - /** - * @brief Get table row count - * - * This method is used to get table row count. - * - * @param context, add context for every RPC - * @param request, target table name. - * @param response, table row count - * - * @return table row count - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to get table schema. + // + // @param TableName, target table name. + // + // @return TableRowCount ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override; - - /** - * @brief List all tables in database - * - * This method is used to list all tables. - * - * @param context, add context for every RPC - * @param request, show table command, usually not use - * @param writer, write tables to client - * - * @return status - * - * @param context - * @param request - * @param writer - */ + // * + // @brief This method is used to list all tables. + // + // @param Command, dummy parameter. + // + // @return TableNameList ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override; - - /** - * @brief Give the server status - * - * - * This method is used to give the server status. - * @param context, add context for every RPC - * @param request, give server command - * @param response, server status - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to delete table. + // + // @param TableName, table name is going to be deleted. + // + // @return TableNameList ::grpc::Status - Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, - ::milvus::grpc::StringReply* response) override; - - /** - * @brief delete table by range - * - * This method is used to delete table by range. - * @param context, add context for every RPC - * @param request, table name and range - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ + DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to build index by table in sync mode. + // + // @param IndexParam, index paramters. + // + // @return Status ::grpc::Status - DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief preload table - * - * This method is used to preload table. - * @param context, add context for every RPC - * @param request, table name - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ - ::grpc::Status - PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief Describe index - * - * This method is used to describe index. - * @param context, add context for every RPC - * @param request, table name - * @param response, index informations - * - * @return status - * - * @param context - * @param request - * @param response - */ + CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to describe index + // + // @param TableName, target table name. + // + // @return IndexParam ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override; - - /** - * @brief Drop index - * - * This method is used to drop index. - * @param context, add context for every RPC - * @param request, table name - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to drop index + // + // @param TableName, target table name. + // + // @return Status ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + ::grpc::Status + CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + ::grpc::Status + ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response) override; + // * + // @brief This method is used to drop partition + // + // @param PartitionName, target partition name. + // + // @return Status + ::grpc::Status + DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + ::grpc::Status + Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, + ::milvus::grpc::VectorIds* response) override; + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + ::grpc::Status + Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, + ::milvus::grpc::TopKQueryResultList* response) override; + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + ::grpc::Status + SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, + ::milvus::grpc::TopKQueryResultList* response) override; + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + ::grpc::Status + Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, + ::milvus::grpc::StringReply* response) override; + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + ::grpc::Status + DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + ::grpc::Status + PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response) override; }; } // namespace grpc diff --git a/core/src/server/grpc_impl/GrpcRequestTask.cpp b/core/src/server/grpc_impl/GrpcRequestTask.cpp index 0816d45750..960b826635 100644 --- a/core/src/server/grpc_impl/GrpcRequestTask.cpp +++ b/core/src/server/grpc_impl/GrpcRequestTask.cpp @@ -366,7 +366,7 @@ DropTableTask::OnExecute() { // step 3: Drop table std::vector dates; - status = DBWrapper::DB()->DeleteTable(table_name_, dates); + status = DBWrapper::DB()->DropTable(table_name_, dates); if (!status.ok()) { return status; } @@ -505,7 +505,8 @@ InsertTask::OnExecute() { memcpy(target_data, src_data, static_cast(sizeof(int64_t) * insert_param_->row_id_array_size())); } - status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), vec_count, vec_f.data(), vec_ids); + status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), insert_param_->partition_tag(), vec_count, + vec_f.data(), vec_ids); rc.ElapseFromBegin("add vectors to engine"); if (!status.ok()) { return status; @@ -637,7 +638,8 @@ SearchTask::OnExecute() { rc.RecordSection("prepare vector data"); // step 6: search vectors - engine::QueryResults results; + engine::ResultIds result_ids; + engine::ResultDistances result_distances; auto record_count = (uint64_t)search_param_->query_record_array().size(); #ifdef MILVUS_ENABLE_PROFILING @@ -647,11 +649,21 @@ SearchTask::OnExecute() { #endif if (file_id_array_.empty()) { - status = - DBWrapper::DB()->Query(table_name_, (size_t)top_k, record_count, nprobe, vec_f.data(), dates, results); + std::vector partition_tags; + for (size_t i = 0; i < search_param_->partition_tag_array_size(); i++) { + partition_tags.emplace_back(search_param_->partition_tag_array(i)); + } + + status = ValidationUtil::ValidatePartitionTags(partition_tags); + if (!status.ok()) { + return status; + } + + status = DBWrapper::DB()->Query(table_name_, partition_tags, (size_t)top_k, record_count, nprobe, + vec_f.data(), dates, result_ids, result_distances); } else { - status = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe, - vec_f.data(), dates, results); + status = DBWrapper::DB()->QueryByFileID(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe, + vec_f.data(), dates, result_ids, result_distances); } #ifdef MILVUS_ENABLE_PROFILING @@ -663,23 +675,20 @@ SearchTask::OnExecute() { return status; } - if (results.empty()) { + if (result_ids.empty()) { return Status::OK(); // empty table } - if (results.size() != record_count) { - std::string msg = "Search " + std::to_string(record_count) + " vectors but only return " + - std::to_string(results.size()) + " results"; - return Status(SERVER_ILLEGAL_SEARCH_RESULT, msg); - } + size_t result_k = result_ids.size() / record_count; // step 7: construct result array - for (auto& result : results) { + for (size_t i = 0; i < record_count; i++) { ::milvus::grpc::TopKQueryResult* topk_query_result = topk_result_list->add_topk_query_result(); - for (auto& pair : result) { + for (size_t j = 0; j < result_k; j++) { ::milvus::grpc::QueryResult* grpc_result = topk_query_result->add_query_result_arrays(); - grpc_result->set_id(pair.first); - grpc_result->set_distance(pair.second); + size_t idx = i * result_k + j; + grpc_result->set_id(result_ids[idx]); + grpc_result->set_distance(result_distances[idx]); } } @@ -759,22 +768,22 @@ CmdTask::OnExecute() { } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -DeleteByRangeTask::DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param) +DeleteByDateTask::DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param) : GrpcBaseTask(DDL_DML_TASK_GROUP), delete_by_range_param_(delete_by_range_param) { } BaseTaskPtr -DeleteByRangeTask::Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param) { +DeleteByDateTask::Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param) { if (delete_by_range_param == nullptr) { SERVER_LOG_ERROR << "grpc input is null!"; return nullptr; } - return std::shared_ptr(new DeleteByRangeTask(delete_by_range_param)); + return std::shared_ptr(new DeleteByDateTask(delete_by_range_param)); } Status -DeleteByRangeTask::OnExecute() { +DeleteByDateTask::OnExecute() { try { TimeRecorder rc("DeleteByRangeTask"); @@ -815,7 +824,7 @@ DeleteByRangeTask::OnExecute() { std::string fname = "/tmp/search_nq_" + this->delete_by_range_param_->table_name() + ".profiling"; ProfilerStart(fname.c_str()); #endif - status = DBWrapper::DB()->DeleteTable(table_name, dates); + status = DBWrapper::DB()->DropTable(table_name, dates); if (!status.ok()) { return status; } @@ -946,6 +955,119 @@ DropIndexTask::OnExecute() { return Status::OK(); } +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +CreatePartitionTask::CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param) + : GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) { +} + +BaseTaskPtr +CreatePartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) { + if (partition_param == nullptr) { + SERVER_LOG_ERROR << "grpc input is null!"; + return nullptr; + } + return std::shared_ptr(new CreatePartitionTask(partition_param)); +} + +Status +CreatePartitionTask::OnExecute() { + TimeRecorder rc("CreatePartitionTask"); + + try { + // step 1: check arguments + auto status = ValidationUtil::ValidateTableName(partition_param_->table_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidateTableName(partition_param_->partition_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()}); + if (!status.ok()) { + return status; + } + + // step 2: create partition + status = DBWrapper::DB()->CreatePartition(partition_param_->table_name(), partition_param_->partition_name(), + partition_param_->tag()); + if (!status.ok()) { + // partition could exist + if (status.code() == DB_ALREADY_EXIST) { + return Status(SERVER_INVALID_TABLE_NAME, status.message()); + } + return status; + } + } catch (std::exception& ex) { + return Status(SERVER_UNEXPECTED_ERROR, ex.what()); + } + + rc.ElapseFromBegin("totally cost"); + + return Status::OK(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +ShowPartitionsTask::ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list) + : GrpcBaseTask(INFO_TASK_GROUP), table_name_(table_name), partition_list_(partition_list) { +} + +BaseTaskPtr +ShowPartitionsTask::Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list) { + return std::shared_ptr(new ShowPartitionsTask(table_name, partition_list)); +} + +Status +ShowPartitionsTask::OnExecute() { + std::vector schema_array; + auto statuts = DBWrapper::DB()->ShowPartitions(table_name_, schema_array); + if (!statuts.ok()) { + return statuts; + } + + for (auto& schema : schema_array) { + ::milvus::grpc::PartitionParam* param = partition_list_->add_partition_array(); + param->set_table_name(schema.owner_table_); + param->set_partition_name(schema.table_id_); + param->set_tag(schema.partition_tag_); + } + return Status::OK(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +DropPartitionTask::DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param) + : GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) { +} + +BaseTaskPtr +DropPartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) { + return std::shared_ptr(new DropPartitionTask(partition_param)); +} + +Status +DropPartitionTask::OnExecute() { + if (!partition_param_->partition_name().empty()) { + auto status = ValidationUtil::ValidateTableName(partition_param_->partition_name()); + if (!status.ok()) { + return status; + } + return DBWrapper::DB()->DropPartition(partition_param_->partition_name()); + } else { + auto status = ValidationUtil::ValidateTableName(partition_param_->table_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()}); + if (!status.ok()) { + return status; + } + return DBWrapper::DB()->DropPartitionByTag(partition_param_->table_name(), partition_param_->tag()); + } +} + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/server/grpc_impl/GrpcRequestTask.h b/core/src/server/grpc_impl/GrpcRequestTask.h index ad2828ebf3..6f8e66af43 100644 --- a/core/src/server/grpc_impl/GrpcRequestTask.h +++ b/core/src/server/grpc_impl/GrpcRequestTask.h @@ -203,19 +203,19 @@ class CmdTask : public GrpcBaseTask { }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class DeleteByRangeTask : public GrpcBaseTask { +class DeleteByDateTask : public GrpcBaseTask { public: static BaseTaskPtr - Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param); + Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param); protected: - explicit DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param); + explicit DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param); Status OnExecute() override; private: - const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param_; + const ::milvus::grpc::DeleteByDateParam* delete_by_range_param_; }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -267,6 +267,55 @@ class DropIndexTask : public GrpcBaseTask { std::string table_name_; }; +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class CreatePartitionTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const ::milvus::grpc::PartitionParam* partition_param); + + protected: + explicit CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param); + + Status + OnExecute() override; + + private: + const ::milvus::grpc::PartitionParam* partition_param_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class ShowPartitionsTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list); + + protected: + ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list); + + Status + OnExecute() override; + + private: + std::string table_name_; + ::milvus::grpc::PartitionList* partition_list_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class DropPartitionTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const ::milvus::grpc::PartitionParam* partition_param); + + protected: + explicit DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param); + + Status + OnExecute() override; + + private: + const ::milvus::grpc::PartitionParam* partition_param_; +}; + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/utils/StringHelpFunctions.cpp b/core/src/utils/StringHelpFunctions.cpp index 230cc1a0ff..af5b2e3b4d 100644 --- a/core/src/utils/StringHelpFunctions.cpp +++ b/core/src/utils/StringHelpFunctions.cpp @@ -17,6 +17,7 @@ #include "utils/StringHelpFunctions.h" +#include #include namespace milvus { @@ -39,39 +40,53 @@ StringHelpFunctions::TrimStringQuote(std::string& string, const std::string& qou } } -Status +void StringHelpFunctions::SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector& result) { if (str.empty()) { - return Status::OK(); + return; } - size_t last = 0; - size_t index = str.find_first_of(delimeter, last); - while (index != std::string::npos) { - result.emplace_back(str.substr(last, index - last)); - last = index + 1; - index = str.find_first_of(delimeter, last); + size_t prev = 0, pos = 0; + while (true) { + pos = str.find_first_of(delimeter, prev); + if (pos == std::string::npos) { + result.emplace_back(str.substr(prev)); + break; + } else { + result.emplace_back(str.substr(prev, pos - prev)); + prev = pos + 1; + } } - if (index - last > 0) { - std::string temp = str.substr(last); - result.emplace_back(temp); +} + +void +StringHelpFunctions::MergeStringWithDelimeter(const std::vector& strs, const std::string& delimeter, + std::string& result) { + if (strs.empty()) { + result = ""; + return; } - return Status::OK(); + result = strs[0]; + for (size_t i = 1; i < strs.size(); i++) { + result = result + delimeter + strs[i]; + } } Status StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote, std::vector& result) { if (quote.empty()) { - return SplitStringByDelimeter(str, delimeter, result); + SplitStringByDelimeter(str, delimeter, result); + return Status::OK(); } size_t last = 0; size_t index = str.find_first_of(quote, last); if (index == std::string::npos) { - return SplitStringByDelimeter(str, delimeter, result); + SplitStringByDelimeter(str, delimeter, result); + return Status::OK(); } std::string process_str = str; @@ -116,11 +131,28 @@ StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::strin } if (!process_str.empty()) { - return SplitStringByDelimeter(process_str, delimeter, result); + SplitStringByDelimeter(process_str, delimeter, result); } return Status::OK(); } +bool +StringHelpFunctions::IsRegexMatch(const std::string& target_str, const std::string& pattern_str) { + // if target_str equals pattern_str, return true + if (target_str == pattern_str) { + return true; + } + + // regex match + std::regex pattern(pattern_str); + std::smatch results; + if (std::regex_search(target_str, results, pattern)) { + return true; + } else { + return false; + } +} + } // namespace server } // namespace milvus diff --git a/core/src/utils/StringHelpFunctions.h b/core/src/utils/StringHelpFunctions.h index cb355332f1..3a41e53f4b 100644 --- a/core/src/utils/StringHelpFunctions.h +++ b/core/src/utils/StringHelpFunctions.h @@ -43,9 +43,12 @@ class StringHelpFunctions { // ,b, | b | // ,, | | // a a - static Status + static void SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector& result); + static void + MergeStringWithDelimeter(const std::vector& strs, const std::string& delimeter, std::string& result); + // assume the table has two columns, quote='\"', delimeter=',' // a,b a | b // "aa,gg,yy",b aa,gg,yy | b @@ -56,6 +59,11 @@ class StringHelpFunctions { static Status SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote, std::vector& result); + + // std regex match function + // regex grammar reference: http://www.cplusplus.com/reference/regex/ECMAScript/ + static bool + IsRegexMatch(const std::string& target_str, const std::string& pattern); }; } // namespace server diff --git a/core/src/utils/ValidationUtil.cpp b/core/src/utils/ValidationUtil.cpp index dc2604813f..ec696ff3e0 100644 --- a/core/src/utils/ValidationUtil.cpp +++ b/core/src/utils/ValidationUtil.cpp @@ -168,6 +168,19 @@ ValidationUtil::ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSc return Status::OK(); } +Status +ValidationUtil::ValidatePartitionTags(const std::vector& partition_tags) { + for (auto& tag : partition_tags) { + if (tag.empty()) { + std::string msg = "Invalid partition tag: " + tag + ". " + "Partition tag should not be empty."; + SERVER_LOG_ERROR << msg; + return Status(SERVER_INVALID_NPROBE, msg); + } + } + + return Status::OK(); +} + Status ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) { #ifdef MILVUS_GPU_VERSION diff --git a/core/src/utils/ValidationUtil.h b/core/src/utils/ValidationUtil.h index 7b24c93fb5..01801e295a 100644 --- a/core/src/utils/ValidationUtil.h +++ b/core/src/utils/ValidationUtil.h @@ -21,6 +21,7 @@ #include "utils/Status.h" #include +#include namespace milvus { namespace server { @@ -54,6 +55,9 @@ class ValidationUtil { static Status ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSchema& table_schema); + static Status + ValidatePartitionTags(const std::vector& partition_tags); + static Status ValidateGpuIndex(uint32_t gpu_index); diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 7bcc21f7ee..e485bd729a 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -77,6 +77,7 @@ set(helper_files ${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp ${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp ${MILVUS_ENGINE_SRC}/utils/Status.cpp + ${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc ) diff --git a/core/unittest/db/test_db.cpp b/core/unittest/db/test_db.cpp index 42dc8dec82..d8614dd5d1 100644 --- a/core/unittest/db/test_db.cpp +++ b/core/unittest/db/test_db.cpp @@ -171,7 +171,8 @@ TEST_F(DBTest, DB_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -186,17 +187,19 @@ TEST_F(DBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); + + std::vector tags; + stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { + ASSERT_EQ(result_ids[i*k], target_ids[i]); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -209,10 +212,10 @@ TEST_F(DBTest, DB_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 40) { - db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); + db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -270,7 +273,7 @@ TEST_F(DBTest, SEARCH_TEST) { // insert data const int batch_size = 100; for (int j = 0; j < nb / batch_size; ++j) { - stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); + stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); if (j == 200) { sleep(1); } @@ -282,16 +285,19 @@ TEST_F(DBTest, SEARCH_TEST) { db_->CreateIndex(TABLE_NAME, index); // wait until build index finish { - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances); ASSERT_TRUE(stat.ok()); } { // search by specify index file milvus::engine::meta::DatesT dates; std::vector file_ids = {"1", "2", "3", "4", "5", "6"}; - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results); + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->QueryByFileID(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, result_ids, result_distances); ASSERT_TRUE(stat.ok()); } @@ -340,7 +346,7 @@ TEST_F(DBTest, PRELOADTABLE_TEST) { int loop = 5; for (auto i = 0; i < loop; ++i) { milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids.size(), nb); } @@ -370,7 +376,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) { ASSERT_FALSE(stat.ok()); milvus::engine::IDNumbers ids; - stat = db_->InsertVectors(table_info.table_id_, 0, nullptr, ids); + stat = db_->InsertVectors(table_info.table_id_, "", 0, nullptr, ids); ASSERT_FALSE(stat.ok()); stat = db_->PreloadTable(table_info.table_id_); @@ -387,15 +393,17 @@ TEST_F(DBTest, SHUTDOWN_TEST) { stat = db_->DescribeIndex(table_info.table_id_, index); ASSERT_FALSE(stat.ok()); + std::vector tags; milvus::engine::meta::DatesT dates; - milvus::engine::QueryResults results; - stat = db_->Query(table_info.table_id_, 1, 1, 1, nullptr, dates, results); + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(table_info.table_id_, tags, 1, 1, 1, nullptr, dates, result_ids, result_distances); ASSERT_FALSE(stat.ok()); std::vector file_ids; - stat = db_->Query(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, results); + stat = db_->QueryByFileID(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, result_ids, result_distances); ASSERT_FALSE(stat.ok()); - stat = db_->DeleteTable(table_info.table_id_, dates); + stat = db_->DropTable(table_info.table_id_, dates); ASSERT_FALSE(stat.ok()); } @@ -408,7 +416,7 @@ TEST_F(DBTest, INDEX_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids.size(), nb); milvus::engine::TableIndex index; @@ -438,6 +446,106 @@ TEST_F(DBTest, INDEX_TEST) { ASSERT_TRUE(stat.ok()); } +TEST_F(DBTest, PARTITION_TEST) { + milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + auto stat = db_->CreateTable(table_info); + ASSERT_TRUE(stat.ok()); + + // create partition and insert data + const int64_t PARTITION_COUNT = 5; + const int64_t INSERT_BATCH = 2000; + std::string table_name = TABLE_NAME; + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + std::string partition_tag = std::to_string(i); + std::string partition_name = table_name + "_" + partition_tag; + stat = db_->CreatePartition(table_name, partition_name, partition_tag); + ASSERT_TRUE(stat.ok()); + + + std::vector xb; + BuildVectors(INSERT_BATCH, xb); + + milvus::engine::IDNumbers vector_ids; + vector_ids.resize(INSERT_BATCH); + for (int64_t k = 0; k < INSERT_BATCH; k++) { + vector_ids[k] = i*INSERT_BATCH + k; + } + + db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids); + ASSERT_EQ(vector_ids.size(), INSERT_BATCH); + } + + //duplicated partition is not allowed + stat = db_->CreatePartition(table_name, "", "0"); + ASSERT_FALSE(stat.ok()); + + std::vector partiton_schema_array; + stat = db_->ShowPartitions(table_name, partiton_schema_array); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT); + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + } + + { // build index + milvus::engine::TableIndex index; + index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT; + index.metric_type_ = (int) milvus::engine::MetricType::L2; + stat = db_->CreateIndex(table_info.table_id_, index); + ASSERT_TRUE(stat.ok()); + + uint64_t row_count = 0; + stat = db_->GetTableRowCount(TABLE_NAME, row_count); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT); + } + + { // search + const int64_t nq = 5; + const int64_t topk = 10; + const int64_t nprobe = 10; + std::vector xq; + BuildVectors(nq, xq); + + // specify partition tags + std::vector tags = {"0", std::to_string(PARTITION_COUNT - 1)}; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in whole table + tags.clear(); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in all partitions(tag regex match) + tags.push_back("\\d"); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + } + + stat = db_->DropPartition(table_name + "_0"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropPartitionByTag(table_name, "1"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropIndex(table_name); + ASSERT_TRUE(stat.ok()); + + milvus::engine::meta::DatesT dates; + stat = db_->DropTable(table_name, dates); + ASSERT_TRUE(stat.ok()); +} + TEST_F(DBTest2, ARHIVE_DISK_CHECK) { milvus::engine::meta::TableSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); @@ -470,7 +578,7 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -502,12 +610,12 @@ TEST_F(DBTest2, DELETE_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); milvus::engine::TableIndex index; stat = db_->CreateIndex(TABLE_NAME, index); std::vector dates; - stat = db_->DeleteTable(TABLE_NAME, dates); + stat = db_->DropTable(TABLE_NAME, dates); std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_TRUE(stat.ok()); @@ -537,7 +645,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); milvus::engine::TableIndex index; stat = db_->CreateIndex(TABLE_NAME, index); @@ -549,7 +657,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) { std::string end_value = CurrentTmDate(1); ConvertTimeRangeToDBDates(start_value, end_value, dates); - stat = db_->DeleteTable(TABLE_NAME, dates); + stat = db_->DropTable(TABLE_NAME, dates); ASSERT_TRUE(stat.ok()); uint64_t row_count = 0; diff --git a/core/unittest/db/test_db_mysql.cpp b/core/unittest/db/test_db_mysql.cpp index e0a84662a4..f828431838 100644 --- a/core/unittest/db/test_db_mysql.cpp +++ b/core/unittest/db/test_db_mysql.cpp @@ -77,11 +77,12 @@ TEST_F(MySqlDBTest, DB_TEST) { std::vector qxb; BuildVectors(qb, qxb); - db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); + db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(5)); @@ -96,25 +97,26 @@ TEST_F(MySqlDBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); + std::vector tags; + stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - // std::cout << results[k][0].first << " " << target_ids[k] << std::endl; - // ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { +// std::cout << results[k][0].first << " " << target_ids[k] << std::endl; +// ASSERT_EQ(results[k][0].first, target_ids[k]); bool exists = false; - for (auto& result : results[k]) { - if (result.first == target_ids[k]) { + for (auto t = 0; t < k; t++) { + if (result_ids[i * k + t] == target_ids[i]) { exists = true; } } ASSERT_TRUE(exists); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -128,13 +130,13 @@ TEST_F(MySqlDBTest, DB_TEST) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { - // if (i==10) { - // db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); - // ASSERT_EQ(target_ids.size(), qb); - // } else { - // db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); - // } - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); +// if (i==10) { +// db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); +// ASSERT_EQ(target_ids.size(), qb); +// } else { +// db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); +// } + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -181,17 +183,17 @@ TEST_F(MySqlDBTest, SEARCH_TEST) { // insert data const int batch_size = 100; for (int j = 0; j < nb / batch_size; ++j) { - stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); - if (j == 200) { - sleep(1); - } + stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); + if (j == 200) { sleep(1); } ASSERT_TRUE(stat.ok()); } sleep(2); // wait until build index finish - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances); ASSERT_TRUE(stat.ok()); } @@ -229,7 +231,7 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -265,17 +267,117 @@ TEST_F(MySqlDBTest, DELETE_TEST) { int loop = 20; for (auto i = 0; i < loop; ++i) { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } - // std::vector dates; - // stat = db_->DeleteTable(TABLE_NAME, dates); - //// std::cout << "5 sec start" << std::endl; - // std::this_thread::sleep_for(std::chrono::seconds(5)); - //// std::cout << "5 sec finish" << std::endl; - // ASSERT_TRUE(stat.ok()); - // - // db_->HasTable(TABLE_NAME, has_table); - // ASSERT_FALSE(has_table); +// std::vector dates; +// stat = db_->DropTable(TABLE_NAME, dates); +//// std::cout << "5 sec start" << std::endl; +// std::this_thread::sleep_for(std::chrono::seconds(5)); +//// std::cout << "5 sec finish" << std::endl; +// ASSERT_TRUE(stat.ok()); +// +// db_->HasTable(TABLE_NAME, has_table); +// ASSERT_FALSE(has_table); +} + +TEST_F(MySqlDBTest, PARTITION_TEST) { + milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + auto stat = db_->CreateTable(table_info); + ASSERT_TRUE(stat.ok()); + + // create partition and insert data + const int64_t PARTITION_COUNT = 5; + const int64_t INSERT_BATCH = 2000; + std::string table_name = TABLE_NAME; + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + std::string partition_tag = std::to_string(i); + std::string partition_name = table_name + "_" + partition_tag; + stat = db_->CreatePartition(table_name, partition_name, partition_tag); + ASSERT_TRUE(stat.ok()); + + + std::vector xb; + BuildVectors(INSERT_BATCH, xb); + + milvus::engine::IDNumbers vector_ids; + vector_ids.resize(INSERT_BATCH); + for (int64_t k = 0; k < INSERT_BATCH; k++) { + vector_ids[k] = i*INSERT_BATCH + k; + } + + db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids); + ASSERT_EQ(vector_ids.size(), INSERT_BATCH); + } + + //duplicated partition is not allowed + stat = db_->CreatePartition(table_name, "", "0"); + ASSERT_FALSE(stat.ok()); + + std::vector partiton_schema_array; + stat = db_->ShowPartitions(table_name, partiton_schema_array); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT); + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + } + + { // build index + milvus::engine::TableIndex index; + index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT; + index.metric_type_ = (int) milvus::engine::MetricType::L2; + stat = db_->CreateIndex(table_info.table_id_, index); + ASSERT_TRUE(stat.ok()); + + uint64_t row_count = 0; + stat = db_->GetTableRowCount(TABLE_NAME, row_count); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT); + } + + { // search + const int64_t nq = 5; + const int64_t topk = 10; + const int64_t nprobe = 10; + std::vector xq; + BuildVectors(nq, xq); + + // specify partition tags + std::vector tags = {"0", std::to_string(PARTITION_COUNT - 1)}; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in whole table + tags.clear(); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in all partitions(tag regex match) + tags.push_back("\\d"); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + } + + stat = db_->DropPartition(table_name + "_0"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropPartitionByTag(table_name, "1"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropIndex(table_name); + ASSERT_TRUE(stat.ok()); + + milvus::engine::meta::DatesT dates; + stat = db_->DropTable(table_name, dates); + ASSERT_TRUE(stat.ok()); } diff --git a/core/unittest/db/test_mem.cpp b/core/unittest/db/test_mem.cpp index f3c635db49..7139553feb 100644 --- a/core/unittest/db/test_mem.cpp +++ b/core/unittest/db/test_mem.cpp @@ -231,7 +231,7 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { vector_ids.push_back(i); } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk @@ -254,10 +254,13 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { int topk = 10, nprobe = 10; for (auto& pair : search_vectors) { auto& search = pair.second; - milvus::engine::QueryResults results; - stat = db_->Query(GetTableName(), topk, 1, nprobe, search.data(), results); - ASSERT_EQ(results[0][0].first, pair.first); - ASSERT_LT(results[0][0].second, 1e-4); + + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(GetTableName(), tags, topk, 1, nprobe, search.data(), result_ids, result_distances); + ASSERT_EQ(result_ids[0], pair.first); + ASSERT_LT(result_distances[0], 1e-4); } } @@ -279,7 +282,7 @@ TEST_F(MemManagerTest2, INSERT_TEST) { std::vector xb; BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); } auto end_time = METRICS_NOW_TIME; @@ -309,7 +312,8 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -324,17 +328,19 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(GetTableName(), k, qb, 10, qxb.data(), results); + + std::vector tags; + stat = db_->Query(GetTableName(), tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { + ASSERT_EQ(result_ids[i * k], target_ids[i]); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -347,10 +353,10 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 0) { - db_->InsertVectors(GetTableName(), qb, qxb.data(), target_ids); + db_->InsertVectors(GetTableName(), "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -379,7 +385,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { vector_ids[i] = i; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], 0); ASSERT_TRUE(stat.ok()); @@ -391,7 +397,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], nb); ASSERT_TRUE(stat.ok()); @@ -403,7 +409,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb / 2; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], nb / 2); ASSERT_TRUE(stat.ok()); @@ -411,7 +417,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { xb.clear(); BuildVectors(nb, xb); vector_ids.clear(); - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); nb = 100; @@ -422,7 +428,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); for (auto i = 0; i < nb; i++) { ASSERT_EQ(vector_ids[i], i + nb); } diff --git a/core/unittest/db/test_meta.cpp b/core/unittest/db/test_meta.cpp index 1311f93141..097f004bd1 100644 --- a/core/unittest/db/test_meta.cpp +++ b/core/unittest/db/test_meta.cpp @@ -84,14 +84,14 @@ TEST_F(MetaTest, TABLE_FILE_TEST) { milvus::engine::meta::DatesT dates; dates.push_back(milvus::engine::utils::GetDate()); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); dates.clear(); for (auto i = 2; i < 10; ++i) { dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i)); } - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2); @@ -102,7 +102,7 @@ TEST_F(MetaTest, TABLE_FILE_TEST) { dates.clear(); dates.push_back(table_file.date_); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); std::vector ids = {table_file.id_}; @@ -332,7 +332,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) { status = impl_->CleanUp(); ASSERT_TRUE(status.ok()); - status = impl_->DeleteTable(table_id); + status = impl_->DropTable(table_id); ASSERT_TRUE(status.ok()); status = impl_->CleanUpFilesWithTTL(1UL); diff --git a/core/unittest/db/test_meta_mysql.cpp b/core/unittest/db/test_meta_mysql.cpp index 2dbd26486d..b9a82c0748 100644 --- a/core/unittest/db/test_meta_mysql.cpp +++ b/core/unittest/db/test_meta_mysql.cpp @@ -74,7 +74,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { milvus::engine::meta::DatesT dates; dates.push_back(milvus::engine::utils::GetDate()); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); uint64_t cnt = 0; @@ -95,7 +95,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { for (auto i = 2; i < 10; ++i) { dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i)); } - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2); @@ -106,7 +106,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { dates.clear(); dates.push_back(table_file.date_); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); std::vector ids = {table_file.id_}; @@ -346,7 +346,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { status = impl_->DeleteTableFiles(table_id); ASSERT_TRUE(status.ok()); - status = impl_->DeleteTable(table_id); + status = impl_->DropTable(table_id); ASSERT_TRUE(status.ok()); status = impl_->CleanUpFilesWithTTL(0UL); diff --git a/core/unittest/db/test_search.cpp b/core/unittest/db/test_search.cpp index 402ba2cd6b..1d1d9a677a 100644 --- a/core/unittest/db/test_search.cpp +++ b/core/unittest/db/test_search.cpp @@ -19,73 +19,97 @@ #include #include +#include "scheduler/job/SearchJob.h" #include "scheduler/task/SearchTask.h" -#include "utils/ThreadPool.h" #include "utils/TimeRecorder.h" +#include "utils/ThreadPool.h" namespace { namespace ms = milvus::scheduler; void -BuildResult(std::vector& output_ids, std::vector& output_distance, uint64_t input_k, uint64_t topk, - uint64_t nq, bool ascending) { +BuildResult(ms::ResultIds& output_ids, + ms::ResultDistances & output_distances, + size_t input_k, + size_t topk, + size_t nq, + bool ascending) { output_ids.clear(); output_ids.resize(nq * topk); - output_distance.clear(); - output_distance.resize(nq * topk); + output_distances.clear(); + output_distances.resize(nq * topk); - for (uint64_t i = 0; i < nq; i++) { - // insert valid items - for (uint64_t j = 0; j < input_k; j++) { + for (size_t i = 0; i < nq; i++) { + //insert valid items + for (size_t j = 0; j < input_k; j++) { output_ids[i * topk + j] = (int64_t)(drand48() * 100000); - output_distance[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); + output_distances[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); } - // insert invalid items - for (uint64_t j = input_k; j < topk; j++) { + //insert invalid items + for (size_t j = input_k; j < topk; j++) { output_ids[i * topk + j] = -1; - output_distance[i * topk + j] = -1.0; + output_distances[i * topk + j] = -1.0; } } } void -CopyResult(std::vector& output_ids, std::vector& output_distance, uint64_t output_topk, - std::vector& input_ids, std::vector& input_distance, uint64_t input_topk, uint64_t nq) { +CopyResult(ms::ResultIds& output_ids, + ms::ResultDistances& output_distances, + size_t output_topk, + ms::ResultIds& input_ids, + ms::ResultDistances& input_distances, + size_t input_topk, + size_t nq) { ASSERT_TRUE(input_ids.size() >= nq * input_topk); - ASSERT_TRUE(input_distance.size() >= nq * input_topk); + ASSERT_TRUE(input_distances.size() >= nq * input_topk); ASSERT_TRUE(output_topk <= input_topk); output_ids.clear(); output_ids.resize(nq * output_topk); - output_distance.clear(); - output_distance.resize(nq * output_topk); + output_distances.clear(); + output_distances.resize(nq * output_topk); - for (uint64_t i = 0; i < nq; i++) { - for (uint64_t j = 0; j < output_topk; j++) { + for (size_t i = 0; i < nq; i++) { + for (size_t j = 0; j < output_topk; j++) { output_ids[i * output_topk + j] = input_ids[i * input_topk + j]; - output_distance[i * output_topk + j] = input_distance[i * input_topk + j]; + output_distances[i * output_topk + j] = input_distances[i * input_topk + j]; } } } void -CheckTopkResult(const std::vector& input_ids_1, const std::vector& input_distance_1, - const std::vector& input_ids_2, const std::vector& input_distance_2, uint64_t topk, - uint64_t nq, bool ascending, const milvus::scheduler::ResultSet& result) { - ASSERT_EQ(result.size(), nq); - ASSERT_EQ(input_ids_1.size(), input_distance_1.size()); - ASSERT_EQ(input_ids_2.size(), input_distance_2.size()); +CheckTopkResult(const ms::ResultIds& input_ids_1, + const ms::ResultDistances& input_distances_1, + size_t input_k_1, + const ms::ResultIds& input_ids_2, + const ms::ResultDistances& input_distances_2, + size_t input_k_2, + size_t topk, + size_t nq, + bool ascending, + const ms::ResultIds& result_ids, + const ms::ResultDistances& result_distances) { + ASSERT_EQ(result_ids.size(), result_distances.size()); + ASSERT_EQ(input_ids_1.size(), input_distances_1.size()); + ASSERT_EQ(input_ids_2.size(), input_distances_2.size()); - for (int64_t i = 0; i < nq; i++) { - std::vector src_vec(input_distance_1.begin() + i * topk, input_distance_1.begin() + (i + 1) * topk); - src_vec.insert(src_vec.end(), input_distance_2.begin() + i * topk, input_distance_2.begin() + (i + 1) * topk); + size_t result_k = result_distances.size() / nq; + ASSERT_EQ(result_k, std::min(topk, input_k_1 + input_k_2)); + + for (size_t i = 0; i < nq; i++) { + std::vector + src_vec(input_distances_1.begin() + i * topk, input_distances_1.begin() + (i + 1) * topk); + src_vec.insert(src_vec.end(), + input_distances_2.begin() + i * topk, + input_distances_2.begin() + (i + 1) * topk); if (ascending) { std::sort(src_vec.begin(), src_vec.end()); } else { std::sort(src_vec.begin(), src_vec.end(), std::greater()); } - // erase invalid items + //erase invalid items std::vector::iterator iter; for (iter = src_vec.begin(); iter != src_vec.end();) { if (*iter < 0.0) @@ -94,36 +118,38 @@ CheckTopkResult(const std::vector& input_ids_1, const std::vector ids1, ids2; - std::vector dist1, dist2; - ms::ResultSet result; +MergeTopkToResultSetTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { + ms::ResultIds ids1, ids2; + ms::ResultDistances dist1, dist2; + ms::ResultIds result_ids; + ms::ResultDistances result_distances; BuildResult(ids1, dist1, topk_1, topk, nq, ascending); BuildResult(ids2, dist2, topk_2, topk, nq, ascending); - ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result); - ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result); - CheckTopkResult(ids1, dist1, ids2, dist2, topk, nq, ascending, result); + ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result_ids, result_distances); + ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result_ids, result_distances); + CheckTopkResult(ids1, dist1, topk_1, ids2, dist2, topk_2, topk, nq, ascending, result_ids, result_distances); } TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { - uint64_t NQ = 15; - uint64_t TOP_K = 64; + size_t NQ = 15; + size_t TOP_K = 64; /* test1, id1/dist1 valid, id2/dist2 empty */ MergeTopkToResultSetTest(TOP_K, 0, NQ, TOP_K, true); @@ -142,21 +168,21 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, false); } -// void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) { +//void MergeTopkArrayTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { // std::vector ids1, ids2; // std::vector dist1, dist2; // ms::ResultSet result; // BuildResult(ids1, dist1, topk_1, topk, nq, ascending); // BuildResult(ids2, dist2, topk_2, topk, nq, ascending); -// uint64_t result_topk = std::min(topk, topk_1 + topk_2); +// size_t result_topk = std::min(topk, topk_1 + topk_2); // ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending); // if (ids1.size() != result_topk * nq) { // std::cout << ids1.size() << " " << result_topk * nq << std::endl; // } // ASSERT_TRUE(ids1.size() == result_topk * nq); // ASSERT_TRUE(dist1.size() == result_topk * nq); -// for (uint64_t i = 0; i < nq; i++) { -// for (uint64_t k = 1; k < result_topk; k++) { +// for (size_t i = 0; i < nq; i++) { +// for (size_t k = 1; k < result_topk; k++) { // float f0 = dist1[i * topk + k - 1]; // float f1 = dist1[i * topk + k]; // if (ascending) { @@ -174,9 +200,9 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { // } //} -// TEST(DBSearchTest, MERGE_ARRAY_TEST) { -// uint64_t NQ = 15; -// uint64_t TOP_K = 64; +//TEST(DBSearchTest, MERGE_ARRAY_TEST) { +// size_t NQ = 15; +// size_t TOP_K = 64; // // /* test1, id1/dist1 valid, id2/dist2 empty */ // MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true); @@ -202,26 +228,26 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { //} TEST(DBSearchTest, REDUCE_PERF_TEST) { - int32_t index_file_num = 478; /* sift1B dataset, index files num */ + int32_t index_file_num = 478; /* sift1B dataset, index files num */ bool ascending = true; - std::vector thread_vec = {4, 8}; - std::vector nq_vec = {1, 10, 100}; - std::vector topk_vec = {1, 4, 16, 64}; - int32_t NQ = nq_vec[nq_vec.size() - 1]; - int32_t TOPK = topk_vec[topk_vec.size() - 1]; + std::vector thread_vec = {4}; + std::vector nq_vec = {1000}; + std::vector topk_vec = {64}; + size_t NQ = nq_vec[nq_vec.size() - 1]; + size_t TOPK = topk_vec[topk_vec.size() - 1]; - std::vector> id_vec; - std::vector> dist_vec; - std::vector input_ids; - std::vector input_distance; + std::vector id_vec; + std::vector dist_vec; + ms::ResultIds input_ids; + ms::ResultDistances input_distances; int32_t i, k, step; /* generate testing data */ for (i = 0; i < index_file_num; i++) { - BuildResult(input_ids, input_distance, TOPK, TOPK, NQ, ascending); + BuildResult(input_ids, input_distances, TOPK, TOPK, NQ, ascending); id_vec.push_back(input_ids); - dist_vec.push_back(input_distance); + dist_vec.push_back(input_distances); } for (int32_t max_thread_num : thread_vec) { @@ -230,136 +256,144 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { for (int32_t nq : nq_vec) { for (int32_t top_k : topk_vec) { - ms::ResultSet final_result, final_result_2, final_result_3; + ms::ResultIds final_result_ids, final_result_ids_2, final_result_ids_3; + ms::ResultDistances final_result_distances, final_result_distances_2, final_result_distances_3; - std::vector> id_vec_1(index_file_num); - std::vector> dist_vec_1(index_file_num); + std::vector id_vec_1(index_file_num); + std::vector dist_vec_1(index_file_num); for (i = 0; i < index_file_num; i++) { CopyResult(id_vec_1[i], dist_vec_1[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); } - std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " + std::to_string(nq) + " " + - std::to_string(top_k); + std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " + + std::to_string(nq) + " " + std::to_string(top_k); milvus::TimeRecorder rc1(str1); /////////////////////////////////////////////////////////////////////////////////////// /* method-1 */ for (i = 0; i < index_file_num; i++) { - ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i], dist_vec_1[i], top_k, nq, top_k, ascending, - final_result); - ASSERT_EQ(final_result.size(), nq); + ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i], + dist_vec_1[i], + top_k, + nq, + top_k, + ascending, + final_result_ids, + final_result_distances); + ASSERT_EQ(final_result_ids.size(), nq * top_k); + ASSERT_EQ(final_result_distances.size(), nq * top_k); } rc1.RecordSection("reduce done"); - // /////////////////////////////////////////////////////////////////////////////////////// - // /* method-2 */ - // std::vector> id_vec_2(index_file_num); - // std::vector> dist_vec_2(index_file_num); - // std::vector k_vec_2(index_file_num); - // for (i = 0; i < index_file_num; i++) { - // CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); - // k_vec_2[i] = top_k; - // } - // - // std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " + - // std::to_string(nq) + " " + std::to_string(top_k); - // milvus::TimeRecorder rc2(str2); - // - // for (step = 1; step < index_file_num; step *= 2) { - // for (i = 0; i + step < index_file_num; i += step * 2) { - // ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i], - // id_vec_2[i + step], dist_vec_2[i + step], - // k_vec_2[i + step], nq, top_k, ascending); - // } - // } - // ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0], - // dist_vec_2[0], - // k_vec_2[0], - // nq, - // top_k, - // ascending, - // final_result_2); - // ASSERT_EQ(final_result_2.size(), nq); - // - // rc2.RecordSection("reduce done"); - // - // for (i = 0; i < nq; i++) { - // ASSERT_EQ(final_result[i].size(), final_result_2[i].size()); - // for (k = 0; k < final_result[i].size(); k++) { - // if (final_result[i][k].first != final_result_2[i][k].first) { - // std::cout << i << " " << k << std::endl; - // } - // ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first); - // ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second); - // } - // } - // - // /////////////////////////////////////////////////////////////////////////////////////// - // /* method-3 parallel */ - // std::vector> id_vec_3(index_file_num); - // std::vector> dist_vec_3(index_file_num); - // std::vector k_vec_3(index_file_num); - // for (i = 0; i < index_file_num; i++) { - // CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); - // k_vec_3[i] = top_k; - // } - // - // std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " + - // std::to_string(nq) + " " + std::to_string(top_k); - // milvus::TimeRecorder rc3(str3); - // - // for (step = 1; step < index_file_num; step *= 2) { - // for (i = 0; i + step < index_file_num; i += step * 2) { - // threads_list.push_back( - // threadPool.enqueue(ms::XSearchTask::MergeTopkArray, - // std::ref(id_vec_3[i]), - // std::ref(dist_vec_3[i]), - // std::ref(k_vec_3[i]), - // std::ref(id_vec_3[i + step]), - // std::ref(dist_vec_3[i + step]), - // std::ref(k_vec_3[i + step]), - // nq, - // top_k, - // ascending)); - // } - // - // while (threads_list.size() > 0) { - // int nready = 0; - // for (auto it = threads_list.begin(); it != threads_list.end(); it = it) { - // auto &p = *it; - // std::chrono::milliseconds span(0); - // if (p.wait_for(span) == std::future_status::ready) { - // threads_list.erase(it++); - // ++nready; - // } else { - // ++it; - // } - // } - // - // if (nready == 0) { - // std::this_thread::yield(); - // } - // } - // } - // ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0], - // dist_vec_3[0], - // k_vec_3[0], - // nq, - // top_k, - // ascending, - // final_result_3); - // ASSERT_EQ(final_result_3.size(), nq); - // - // rc3.RecordSection("reduce done"); - // - // for (i = 0; i < nq; i++) { - // ASSERT_EQ(final_result[i].size(), final_result_3[i].size()); - // for (k = 0; k < final_result[i].size(); k++) { - // ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first); - // ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second); - // } - // } +// /////////////////////////////////////////////////////////////////////////////////////// +// /* method-2 */ +// std::vector> id_vec_2(index_file_num); +// std::vector> dist_vec_2(index_file_num); +// std::vector k_vec_2(index_file_num); +// for (i = 0; i < index_file_num; i++) { +// CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); +// k_vec_2[i] = top_k; +// } +// +// std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " + +// std::to_string(nq) + " " + std::to_string(top_k); +// milvus::TimeRecorder rc2(str2); +// +// for (step = 1; step < index_file_num; step *= 2) { +// for (i = 0; i + step < index_file_num; i += step * 2) { +// ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i], +// id_vec_2[i + step], dist_vec_2[i + step], k_vec_2[i + step], +// nq, top_k, ascending); +// } +// } +// ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0], +// dist_vec_2[0], +// k_vec_2[0], +// nq, +// top_k, +// ascending, +// final_result_2); +// ASSERT_EQ(final_result_2.size(), nq); +// +// rc2.RecordSection("reduce done"); +// +// for (i = 0; i < nq; i++) { +// ASSERT_EQ(final_result[i].size(), final_result_2[i].size()); +// for (k = 0; k < final_result[i].size(); k++) { +// if (final_result[i][k].first != final_result_2[i][k].first) { +// std::cout << i << " " << k << std::endl; +// } +// ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first); +// ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second); +// } +// } +// +// /////////////////////////////////////////////////////////////////////////////////////// +// /* method-3 parallel */ +// std::vector> id_vec_3(index_file_num); +// std::vector> dist_vec_3(index_file_num); +// std::vector k_vec_3(index_file_num); +// for (i = 0; i < index_file_num; i++) { +// CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); +// k_vec_3[i] = top_k; +// } +// +// std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " + +// std::to_string(nq) + " " + std::to_string(top_k); +// milvus::TimeRecorder rc3(str3); +// +// for (step = 1; step < index_file_num; step *= 2) { +// for (i = 0; i + step < index_file_num; i += step * 2) { +// threads_list.push_back( +// threadPool.enqueue(ms::XSearchTask::MergeTopkArray, +// std::ref(id_vec_3[i]), +// std::ref(dist_vec_3[i]), +// std::ref(k_vec_3[i]), +// std::ref(id_vec_3[i + step]), +// std::ref(dist_vec_3[i + step]), +// std::ref(k_vec_3[i + step]), +// nq, +// top_k, +// ascending)); +// } +// +// while (threads_list.size() > 0) { +// int nready = 0; +// for (auto it = threads_list.begin(); it != threads_list.end(); it = it) { +// auto &p = *it; +// std::chrono::milliseconds span(0); +// if (p.wait_for(span) == std::future_status::ready) { +// threads_list.erase(it++); +// ++nready; +// } else { +// ++it; +// } +// } +// +// if (nready == 0) { +// std::this_thread::yield(); +// } +// } +// } +// ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0], +// dist_vec_3[0], +// k_vec_3[0], +// nq, +// top_k, +// ascending, +// final_result_3); +// ASSERT_EQ(final_result_3.size(), nq); +// +// rc3.RecordSection("reduce done"); +// +// for (i = 0; i < nq; i++) { +// ASSERT_EQ(final_result[i].size(), final_result_3[i].size()); +// for (k = 0; k < final_result[i].size(); k++) { +// ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first); +// ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second); +// } +// } } } } diff --git a/core/unittest/metrics/test_metrics.cpp b/core/unittest/metrics/test_metrics.cpp index 171aac17ee..10410a648d 100644 --- a/core/unittest/metrics/test_metrics.cpp +++ b/core/unittest/metrics/test_metrics.cpp @@ -15,19 +15,19 @@ // specific language governing permissions and limitations // under the License. -#include #include #include #include #include #include +#include #include "cache/CpuCacheMgr.h" -#include "db/DB.h" -#include "db/meta/SqliteMetaImpl.h" +#include "server/Config.h" #include "metrics/Metrics.h" #include "metrics/utils.h" -#include "server/Config.h" +#include "db/DB.h" +#include "db/meta/SqliteMetaImpl.h" TEST_F(MetricTest, METRIC_TEST) { milvus::server::Config::GetInstance().SetMetricConfigCollector("zabbix"); @@ -36,15 +36,15 @@ TEST_F(MetricTest, METRIC_TEST) { milvus::server::Metrics::GetInstance(); milvus::server::SystemInfo::GetInstance().Init(); - // server::Metrics::GetInstance().Init(); - // server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr()); +// server::Metrics::GetInstance().Init(); +// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr()); milvus::server::Metrics::GetInstance().Init(); - // server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr()); +// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr()); milvus::cache::CpuCacheMgr::GetInstance()->SetCapacity(1UL * 1024 * 1024 * 1024); std::cout << milvus::cache::CpuCacheMgr::GetInstance()->CacheCapacity() << std::endl; - static const char* group_name = "test_group"; + static const char *group_name = "test_group"; static const int group_dim = 256; milvus::engine::meta::TableSchema group_info; @@ -61,21 +61,23 @@ TEST_F(MetricTest, METRIC_TEST) { int d = 256; int nb = 50; - float* xb = new float[d * nb]; + float *xb = new float[d * nb]; for (int i = 0; i < nb; i++) { for (int j = 0; j < d; j++) xb[d * i + j] = drand48(); xb[d * i] += i / 2000.; } int qb = 5; - float* qxb = new float[d * qb]; + float *qxb = new float[d * qb]; for (int i = 0; i < qb; i++) { for (int j = 0; j < d; j++) qxb[d * i + j] = drand48(); qxb[d * i] += i / 2000.; } std::thread search([&]() { - milvus::engine::QueryResults results; +// std::vector tags; +// milvus::engine::ResultIds result_ids; +// milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -90,16 +92,17 @@ TEST_F(MetricTest, METRIC_TEST) { prev_count = count; START_TIMER; - // stat = db_->Query(group_name, k, qb, qxb, results); - ss << "Search " << j << " With Size " << (float)(count * group_dim * sizeof(float)) / (1024 * 1024) << " M"; +// stat = db_->Query(group_name, tags, k, qb, qxb, result_ids, result_distances); + ss << "Search " << j << " With Size " << (float) (count * group_dim * sizeof(float)) / (1024 * 1024) + << " M"; for (auto k = 0; k < qb; ++k) { - // ASSERT_EQ(results[k][0].first, target_ids[k]); +// ASSERT_EQ(results[k][0].first, target_ids[k]); ss.str(""); ss << "Result [" << k << "]:"; - // for (auto result : results[k]) { - // ss << result.first << " "; - // } +// for (auto result : results[k]) { +// ss << result.first << " "; +// } } ASSERT_TRUE(count >= prev_count); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -110,10 +113,10 @@ TEST_F(MetricTest, METRIC_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 40) { - db_->InsertVectors(group_name, qb, qxb, target_ids); + db_->InsertVectors(group_name, "", qb, qxb, target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(group_name, nb, xb, vector_ids); + db_->InsertVectors(group_name, "", nb, xb, vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(2000)); } @@ -152,3 +155,5 @@ TEST_F(MetricTest, COLLECTOR_METRICS_TEST) { milvus::server::MetricCollector metric_collector(); } + + diff --git a/core/unittest/server/test_config.cpp b/core/unittest/server/test_config.cpp index caaa66f979..6a62ddd97c 100644 --- a/core/unittest/server/test_config.cpp +++ b/core/unittest/server/test_config.cpp @@ -22,6 +22,7 @@ #include "server/Config.h" #include "server/utils.h" #include "utils/CommonUtil.h" +#include "utils/StringHelpFunctions.h" #include "utils/ValidationUtil.h" namespace { @@ -98,6 +99,328 @@ TEST_F(ConfigTest, CONFIG_TEST) { ASSERT_TRUE(seqs.empty()); } +TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) { + std::string config_path(CONFIG_PATH); + milvus::server::Config& config = milvus::server::Config::GetInstance(); + milvus::Status s; + std::string str_val; + int32_t int32_val; + int64_t int64_val; + float float_val; + bool bool_val; + + /* server config */ + std::string server_addr = "192.168.1.155"; + s = config.SetServerConfigAddress(server_addr); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigAddress(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_addr); + + std::string server_port = "12345"; + s = config.SetServerConfigPort(server_port); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigPort(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_port); + + std::string server_mode = "cluster_readonly"; + s = config.SetServerConfigDeployMode(server_mode); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigDeployMode(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_mode); + + std::string server_time_zone = "UTC+6"; + s = config.SetServerConfigTimeZone(server_time_zone); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigTimeZone(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_time_zone); + + /* db config */ + std::string db_primary_path = "/home/zilliz"; + s = config.SetDBConfigPrimaryPath(db_primary_path); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigPrimaryPath(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_primary_path); + + std::string db_secondary_path = "/home/zilliz"; + s = config.SetDBConfigSecondaryPath(db_secondary_path); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigSecondaryPath(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_secondary_path); + + std::string db_backend_url = "mysql://root:123456@127.0.0.1:19530/milvus"; + s = config.SetDBConfigBackendUrl(db_backend_url); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigBackendUrl(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_backend_url); + + int32_t db_archive_disk_threshold = 100; + s = config.SetDBConfigArchiveDiskThreshold(std::to_string(db_archive_disk_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigArchiveDiskThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_archive_disk_threshold); + + int32_t db_archive_days_threshold = 365; + s = config.SetDBConfigArchiveDaysThreshold(std::to_string(db_archive_days_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigArchiveDaysThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_archive_days_threshold); + + int32_t db_insert_buffer_size = 2; + s = config.SetDBConfigInsertBufferSize(std::to_string(db_insert_buffer_size)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigInsertBufferSize(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_insert_buffer_size); + + /* metric config */ + bool metric_enable_monitor = false; + s = config.SetMetricConfigEnableMonitor(std::to_string(metric_enable_monitor)); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigEnableMonitor(bool_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(bool_val == metric_enable_monitor); + + std::string metric_collector = "prometheus"; + s = config.SetMetricConfigCollector(metric_collector); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigCollector(str_val); + ASSERT_TRUE(str_val == metric_collector); + + std::string metric_prometheus_port = "2222"; + s = config.SetMetricConfigPrometheusPort(metric_prometheus_port); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigPrometheusPort(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == metric_prometheus_port); + + /* cache config */ + int64_t cache_cpu_cache_capacity = 5; + s = config.SetCacheConfigCpuCacheCapacity(std::to_string(cache_cpu_cache_capacity)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCpuCacheCapacity(int64_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int64_val == cache_cpu_cache_capacity); + + float cache_cpu_cache_threshold = 0.1; + s = config.SetCacheConfigCpuCacheThreshold(std::to_string(cache_cpu_cache_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCpuCacheThreshold(float_val); + ASSERT_TRUE(float_val == cache_cpu_cache_threshold); + +#ifdef MILVUS_GPU_VERSION + int64_t cache_gpu_cache_capacity = 1; + s = config.SetCacheConfigGpuCacheCapacity(std::to_string(cache_gpu_cache_capacity)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigGpuCacheCapacity(int64_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int64_val == cache_gpu_cache_capacity); + + float cache_gpu_cache_threshold = 0.2; + s = config.SetCacheConfigGpuCacheThreshold(std::to_string(cache_gpu_cache_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigGpuCacheThreshold(float_val); + ASSERT_TRUE(float_val == cache_gpu_cache_threshold); +#endif + + bool cache_insert_data = true; + s = config.SetCacheConfigCacheInsertData(std::to_string(cache_insert_data)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCacheInsertData(bool_val); + ASSERT_TRUE(bool_val == cache_insert_data); + + /* engine config */ + int32_t engine_use_blas_threshold = 50; + s = config.SetEngineConfigUseBlasThreshold(std::to_string(engine_use_blas_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigUseBlasThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_use_blas_threshold); + + int32_t engine_omp_thread_num = 8; + s = config.SetEngineConfigOmpThreadNum(std::to_string(engine_omp_thread_num)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigOmpThreadNum(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_omp_thread_num); + + int32_t engine_gpu_search_threshold = 800; + s = config.SetEngineConfigGpuSearchThreshold(std::to_string(engine_gpu_search_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigGpuSearchThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_gpu_search_threshold); + + /* resource config */ + std::string resource_mode = "simple"; + s = config.SetResourceConfigMode(resource_mode); + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigMode(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == resource_mode); + +#ifdef MILVUS_CPU_VERSION + std::vector search_resources = {"cpu"}; +#else + std::vector search_resources = {"cpu", "gpu0"}; +#endif + std::vector res_vec; + std::string res_str; + milvus::server::StringHelpFunctions::MergeStringWithDelimeter( + search_resources, milvus::server::CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_str); + s = config.SetResourceConfigSearchResources(res_str); + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigSearchResources(res_vec); + ASSERT_TRUE(s.ok()); + for (size_t i = 0; i < search_resources.size(); i++) { + ASSERT_TRUE(search_resources[i] == res_vec[i]); + } + +#ifdef MILVUS_CPU_VERSION + int32_t resource_index_build_device = milvus::server::CPU_DEVICE_ID; + s = config.SetResourceConfigIndexBuildDevice("cpu"); +#else + int32_t resource_index_build_device = 0; + s = config.SetResourceConfigIndexBuildDevice("gpu" + std::to_string(resource_index_build_device)); +#endif + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigIndexBuildDevice(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == resource_index_build_device); +} + +TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) { + std::string config_path(CONFIG_PATH); + milvus::server::Config& config = milvus::server::Config::GetInstance(); + milvus::Status s; + + s = config.LoadConfigFile(""); + ASSERT_FALSE(s.ok()); + + s = config.LoadConfigFile(config_path + INVALID_CONFIG_FILE); + ASSERT_FALSE(s.ok()); + s = config.LoadConfigFile(config_path + "dummy.yaml"); + ASSERT_FALSE(s.ok()); + + /* server config */ + s = config.SetServerConfigAddress("0.0.0"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigAddress("0.0.0.256"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigPort("a"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigPort("99999"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigDeployMode("cluster"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigTimeZone("GM"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigTimeZone("GMT8"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigTimeZone("UTCA"); + ASSERT_FALSE(s.ok()); + + /* db config */ + s = config.SetDBConfigPrimaryPath(""); + ASSERT_FALSE(s.ok()); + + // s = config.SetDBConfigSecondaryPath(""); + // ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigBackendUrl("http://www.google.com"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigBackendUrl("sqlite://:@:"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigBackendUrl("mysql://root:123456@127.0.0.1/milvus"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigArchiveDiskThreshold("0x10"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigArchiveDaysThreshold("0x10"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigInsertBufferSize("a"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigInsertBufferSize("0"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigInsertBufferSize("2048"); + ASSERT_FALSE(s.ok()); + + /* metric config */ + s = config.SetMetricConfigEnableMonitor("Y"); + ASSERT_FALSE(s.ok()); + + s = config.SetMetricConfigCollector("zilliz"); + ASSERT_FALSE(s.ok()); + + s = config.SetMetricConfigPrometheusPort("0xff"); + ASSERT_FALSE(s.ok()); + + /* cache config */ + s = config.SetCacheConfigCpuCacheCapacity("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheCapacity("0"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheCapacity("2048"); + ASSERT_FALSE(s.ok()); + + s = config.SetCacheConfigCpuCacheThreshold("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheThreshold("1.0"); + ASSERT_FALSE(s.ok()); + +#ifdef MILVUS_GPU_VERSION + s = config.SetCacheConfigGpuCacheCapacity("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigGpuCacheCapacity("128"); + ASSERT_FALSE(s.ok()); + + s = config.SetCacheConfigGpuCacheThreshold("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigGpuCacheThreshold("1.0"); + ASSERT_FALSE(s.ok()); +#endif + + s = config.SetCacheConfigCacheInsertData("N"); + ASSERT_FALSE(s.ok()); + + /* engine config */ + s = config.SetEngineConfigUseBlasThreshold("0xff"); + ASSERT_FALSE(s.ok()); + + s = config.SetEngineConfigOmpThreadNum("a"); + ASSERT_FALSE(s.ok()); + s = config.SetEngineConfigOmpThreadNum("10000"); + ASSERT_FALSE(s.ok()); + + s = config.SetEngineConfigGpuSearchThreshold("-1"); + ASSERT_FALSE(s.ok()); + + /* resource config */ + s = config.SetResourceConfigMode("default"); + ASSERT_FALSE(s.ok()); + + s = config.SetResourceConfigSearchResources("gpu10"); + ASSERT_FALSE(s.ok()); + + s = config.SetResourceConfigIndexBuildDevice("gup2"); + ASSERT_FALSE(s.ok()); + s = config.SetResourceConfigIndexBuildDevice("gpu16"); + ASSERT_FALSE(s.ok()); +} + TEST_F(ConfigTest, SERVER_CONFIG_TEST) { std::string config_path(CONFIG_PATH); milvus::server::Config& config = milvus::server::Config::GetInstance(); diff --git a/core/unittest/server/test_rpc.cpp b/core/unittest/server/test_rpc.cpp index 100613db7a..4d5b9e3567 100644 --- a/core/unittest/server/test_rpc.cpp +++ b/core/unittest/server/test_rpc.cpp @@ -380,6 +380,44 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS); } +TEST_F(RpcHandlerTest, PARTITION_TEST) { + ::grpc::ServerContext context; + ::milvus::grpc::TableSchema table_schema; + ::milvus::grpc::Status response; + std::string str_table_name = "tbl_partition"; + table_schema.set_table_name(str_table_name); + table_schema.set_dimension(TABLE_DIM); + table_schema.set_index_file_size(INDEX_FILE_SIZE); + table_schema.set_metric_type(1); + handler->CreateTable(&context, &table_schema, &response); + + ::milvus::grpc::PartitionParam partition_param; + partition_param.set_table_name(str_table_name); + std::string partition_name = "tbl_partition_0"; + partition_param.set_partition_name(partition_name); + std::string partition_tag = "0"; + partition_param.set_tag(partition_tag); + handler->CreatePartition(&context, &partition_param, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + + ::milvus::grpc::TableName table_name; + table_name.set_table_name(str_table_name); + ::milvus::grpc::PartitionList partition_list; + handler->ShowPartitions(&context, &table_name, &partition_list); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + ASSERT_EQ(partition_list.partition_array_size(), 1); + + ::milvus::grpc::PartitionParam partition_parm; + partition_parm.set_table_name(str_table_name); + partition_parm.set_tag(partition_tag); + handler->DropPartition(&context, &partition_parm, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + + partition_parm.set_partition_name(partition_name); + handler->DropPartition(&context, &partition_parm, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); +} + TEST_F(RpcHandlerTest, CMD_TEST) { ::grpc::ServerContext context; ::milvus::grpc::Command command; @@ -396,26 +434,26 @@ TEST_F(RpcHandlerTest, CMD_TEST) { TEST_F(RpcHandlerTest, DELETE_BY_RANGE_TEST) { ::grpc::ServerContext context; - ::milvus::grpc::DeleteByRangeParam request; + ::milvus::grpc::DeleteByDateParam request; ::milvus::grpc::Status status; - handler->DeleteByRange(&context, nullptr, &status); - handler->DeleteByRange(&context, &request, &status); + handler->DeleteByDate(&context, nullptr, &status); + handler->DeleteByDate(&context, &request, &status); request.set_table_name(TABLE_NAME); request.mutable_range()->set_start_value(CurrentTmDate(-3)); request.mutable_range()->set_end_value(CurrentTmDate(-2)); - ::grpc::Status grpc_status = handler->DeleteByRange(&context, &request, &status); + ::grpc::Status grpc_status = handler->DeleteByDate(&context, &request, &status); int error_code = status.error_code(); // ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS); request.mutable_range()->set_start_value("test6"); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); request.mutable_range()->set_start_value(CurrentTmDate(-2)); request.mutable_range()->set_end_value("test6"); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); request.mutable_range()->set_end_value(CurrentTmDate(-2)); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); } ////////////////////////////////////////////////////////////////////// diff --git a/core/unittest/server/test_util.cpp b/core/unittest/server/test_util.cpp index 36d0ab8597..68400f2454 100644 --- a/core/unittest/server/test_util.cpp +++ b/core/unittest/server/test_util.cpp @@ -117,12 +117,11 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) { str = "a,b,c"; std::vector result; - auto status = milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result); - ASSERT_TRUE(status.ok()); + milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result); ASSERT_EQ(result.size(), 3UL); result.clear(); - status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); + auto status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); ASSERT_TRUE(status.ok()); ASSERT_EQ(result.size(), 3UL); @@ -136,6 +135,10 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) { status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); ASSERT_TRUE(status.ok()); ASSERT_EQ(result.size(), 3UL); + + ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "abc")); + ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("a8c", "a\\d.")); + ASSERT_FALSE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "a\\dc")); } TEST(UtilTest, BLOCKINGQUEUE_TEST) { @@ -314,6 +317,13 @@ TEST(ValidationUtilTest, VALIDATE_NPROBE_TEST) { ASSERT_NE(milvus::server::ValidationUtil::ValidateSearchNprobe(101, schema).code(), milvus::SERVER_SUCCESS); } +TEST(ValidationUtilTest, VALIDATE_PARTITION_TAGS) { + std::vector partition_tags = {"abc"}; + ASSERT_EQ(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS); + partition_tags.push_back(""); + ASSERT_NE(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS); +} + #ifdef MILVUS_GPU_VERSION TEST(ValidationUtilTest, VALIDATE_GPU_TEST) { ASSERT_EQ(milvus::server::ValidationUtil::ValidateGpuIndex(0).code(), milvus::SERVER_SUCCESS); diff --git a/shards/.dockerignore b/shards/.dockerignore new file mode 100644 index 0000000000..e450610057 --- /dev/null +++ b/shards/.dockerignore @@ -0,0 +1,13 @@ +.git +.gitignore +.env +.coverage +.dockerignore +cov_html/ + +.pytest_cache +__pycache__ +*/__pycache__ +*.md +*.yml +*.yaml diff --git a/shards/Dockerfile b/shards/Dockerfile new file mode 100644 index 0000000000..594640619e --- /dev/null +++ b/shards/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.6 +RUN apt update && apt install -y \ + less \ + telnet +RUN mkdir /source +WORKDIR /source +ADD ./requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . +CMD python mishards/main.py diff --git a/shards/Makefile b/shards/Makefile new file mode 100644 index 0000000000..c8aa6127f8 --- /dev/null +++ b/shards/Makefile @@ -0,0 +1,35 @@ +HOST=$(or $(host),127.0.0.1) +PORT=$(or $(port),19530) + +build: + docker build --network=host -t milvusdb/mishards . +push: + docker push milvusdb/mishards +pull: + docker pull milvusdb/mishards +deploy: clean_deploy + cd all_in_one && docker-compose -f all_in_one.yml up -d && cd - +clean_deploy: + cd all_in_one && docker-compose -f all_in_one.yml down && cd - +probe_deploy: + docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py" +cluster: + cd kubernetes_demo;./start.sh baseup;sleep 10;./start.sh appup;cd - +clean_cluster: + cd kubernetes_demo;./start.sh cleanup;cd - +cluster_status: + kubectl get pods -n milvus -o wide +probe_cluster: + @echo + $(shell kubectl get service -n milvus | grep milvus-proxy-servers | awk {'print $$4,$$5'} | awk -F"[: ]" {'print "docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c \"python all_in_one/probe_test.py --port="$$2" --host="$$1"\""'}) +probe: + docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}" +clean_coverage: + rm -rf cov_html +clean: clean_coverage clean_deploy clean_cluster +style: + pycodestyle --config=. +coverage: + pytest --cov-report html:cov_html --cov=mishards +test: + pytest diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md new file mode 100644 index 0000000000..192a0fd285 --- /dev/null +++ b/shards/Tutorial_CN.md @@ -0,0 +1,147 @@ +# Mishards使用文档 +--- +Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析。单个 Milvus 实例可处理十亿级数据规模,而对于百亿或者千亿规模数据的需求,则需要一个 Milvus 集群实例,该实例对于上层应用可以像单机实例一样使用,同时满足海量数据低延迟,高并发业务需求。mishards就是一个集群中间件,其内部处理请求转发,读写分离,水平扩展,动态扩容,为用户提供内存和算力可以无限扩容的 Milvus 实例。 + +## 运行环境 +--- + +### 单机快速启动实例 +**`python >= 3.4`环境** + +``` +1. cd milvus/shards +2. pip install -r requirements.txt +3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b +4. sudo chown -R $USER:$USER /tmp/milvus +5. cp mishards/.env.example mishards/.env +6. 在python mishards/main.py #.env配置mishards监听19532端口 +7. make probe port=19532 #健康检查 +``` + +### 容器启动实例 +`all_in_one`会在服务器上开启两个milvus实例,一个mishards实例,一个jaeger链路追踪实例 + +**启动** +``` +cd milvus/shards +1. 安装docker-compose +2. make build +3. make deploy #监听19531端口 +4. make clean_deploy #清理服务 +5. make probe_deplopy #健康检查 +``` + +**打开Jaeger UI** +``` +浏览器打开 "http://127.0.0.1:16686/" +``` + +### kubernetes中快速启动 +**准备** +``` +- kubernetes集群 +- 安装nvidia-docker +- 共享存储 +- 安装kubectl并能访问集群 +``` + +**步骤** +``` +cd milvus/shards +1. make deploy_cluster #启动集群 +2. make probe_cluster #健康检查 +3. make clean_cluster #关闭集群 +``` + +**扩容计算实例** +``` +cd milvus/shards/kubernetes_demo/ +./start.sh scale-ro-server 2 扩容计算实例到2 +``` + +**扩容代理器实例** +``` +cd milvus/shards/kubernetes_demo/ +./start.sh scale-proxy 2 扩容代理服务器实例到2 +``` + +**查看日志** +``` +kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 查看计算节点milvus-ro-servers-0日志 +``` + +## 测试 + +**启动单元测试** +``` +1. cd milvus/shards +2. make test +``` + +**单元测试覆盖率** +``` +1. cd milvus/shards +2. make coverage +``` + +**代码风格检查** +``` +1. cd milvus/shards +2. make style +``` + +## mishards配置详解 + +### 全局 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| Debug | No | bool | True | 是否Debug工作模式 | +| TIMEZONE | No | string | "UTC" | 时区 | +| MAX_RETRY | No | int | 3 | 最大连接重试次数 | +| SERVER_PORT | No | int | 19530 | 配置服务端口 | +| WOSERVER | **Yes** | str | - | 配置后台可写Milvus实例地址。目前只支持静态设置,例"tcp://127.0.0.1:19530" | + +### 元数据 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| SQLALCHEMY_DATABASE_URI | **Yes** | string | - | 配置元数据存储数据库地址 | +| SQL_ECHO | No | bool | False | 是否打印Sql详细语句 | +| SQLALCHEMY_DATABASE_TEST_URI | No | string | - | 配置测试环境下元数据存储数据库地址 | +| SQL_TEST_ECHO | No | bool | False | 配置测试环境下是否打印Sql详细语句 | + +### 服务发现 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| DISCOVERY_PLUGIN_PATH | No | string | - | 用户自定义服务发现插件搜索路径,默认使用系统搜索路径| +| DISCOVERY_CLASS_NAME | No | string | static | 在服务发现插件搜索路径下搜索类并实例化。目前系统提供 **static** 和 **kubernetes** 两种类,默认使用 **static** | +| DISCOVERY_STATIC_HOSTS | No | list | [] | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置服务地址列表,例"192.168.1.188,192.168.1.190"| +| DISCOVERY_STATIC_PORT | No | int | 19530 | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置 Hosts 监听端口 | +| DISCOVERY_KUBERNETES_NAMESPACE | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,配置集群 namespace | +| DISCOVERY_KUBERNETES_IN_CLUSTER | No | bool | False | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现是否在集群中运行 | +| DISCOVERY_KUBERNETES_POLL_INTERVAL | No | int | 5 | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现监听服务列表频率,单位 Second | +| DISCOVERY_KUBERNETES_POD_PATT | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,匹配可读 Milvus 实例的正则表达式 | +| DISCOVERY_KUBERNETES_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的标签选择 | + +### 链路追踪 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| TRACER_PLUGIN_PATH | No | string | - | 用户自定义链路追踪插件搜索路径,默认使用系统搜索路径| +| TRACER_CLASS_NAME | No | string | "" | 链路追踪方案选择,目前只实现 **Jaeger**, 默认不使用| +| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪服务名 | +| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样类型 | +| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样频率 | +| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪是否采集 Payload | + +### 日志 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| LOG_LEVEL | No | string | "DEBUG" if Debug is ON else "INFO" | 日志记录级别 | +| LOG_PATH | No | string | "/tmp/mishards" | 日志记录路径 | +| LOG_NAME | No | string | "logfile" | 日志记录名 | + +### 路由 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| ROUTER_PLUGIN_PATH | No | string | - | 用户自定义路由插件搜索路径,默认使用系统搜索路径| +| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类。目前系统只提供了类 **FileBasedHashRingRouter** | +| ROUTER_CLASS_TEST_NAME | No | string | FileBasedHashRingRouter | 测试环境下处理请求路由类名, 可注册自定义类 | diff --git a/shards/all_in_one/all_in_one.yml b/shards/all_in_one/all_in_one.yml new file mode 100644 index 0000000000..40473fe8b9 --- /dev/null +++ b/shards/all_in_one/all_in_one.yml @@ -0,0 +1,53 @@ +version: "2.3" +services: + milvus_wr: + runtime: nvidia + restart: always + image: milvusdb/milvus:0.5.0-d102119-ede20b + volumes: + - /tmp/milvus/db:/opt/milvus/db + + milvus_ro: + runtime: nvidia + restart: always + image: milvusdb/milvus:0.5.0-d102119-ede20b + volumes: + - /tmp/milvus/db:/opt/milvus/db + - ./ro_server.yml:/opt/milvus/conf/server_config.yaml + + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + + mishards: + restart: always + image: milvusdb/mishards + ports: + - "0.0.0.0:19531:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + # - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + FROM_EXAMPLE: 'true' + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus_wr:19530 + DISCOVERY_PLUGIN_PATH: static + DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro + TRACER_CLASS_NAME: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + + depends_on: + - milvus_wr + - milvus_ro + - jaeger diff --git a/shards/all_in_one/probe_test.py b/shards/all_in_one/probe_test.py new file mode 100644 index 0000000000..6250465910 --- /dev/null +++ b/shards/all_in_one/probe_test.py @@ -0,0 +1,25 @@ +from milvus import Milvus + +RED = '\033[0;31m' +GREEN = '\033[0;32m' +ENDC = '' + + +def test(host='127.0.0.1', port=19531): + client = Milvus() + try: + status = client.connect(host=host, port=port) + if status.OK(): + print('{}Pass: Connected{}'.format(GREEN, ENDC)) + return 0 + else: + print('{}Error: {}{}'.format(RED, status, ENDC)) + return 1 + except Exception as exc: + print('{}Error: {}{}'.format(RED, exc, ENDC)) + return 1 + + +if __name__ == '__main__': + import fire + fire.Fire(test) diff --git a/shards/all_in_one/ro_server.yml b/shards/all_in_one/ro_server.yml new file mode 100644 index 0000000000..10cf695448 --- /dev/null +++ b/shards/all_in_one/ro_server.yml @@ -0,0 +1,41 @@ +server_config: + address: 0.0.0.0 # milvus server ip address (IPv4) + port: 19530 # port range: 1025 ~ 65534 + deploy_mode: cluster_readonly # deployment type: single, cluster_readonly, cluster_writable + time_zone: UTC+8 + +db_config: + primary_path: /opt/milvus # path used to store data and meta + secondary_path: # path used to store data only, split by semicolon + + backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database + # Keep 'dialect://:@:/', and replace other texts with real values + # Replace 'dialect' with 'mysql' or 'sqlite' + + insert_buffer_size: 4 # GB, maximum insert buffer size allowed + # sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory + + preload_table: # preload data at startup, '*' means load all tables, empty value means no preload + # you can specify preload tables like this: table1,table2,table3 + +metric_config: + enable_monitor: false # enable monitoring or not + collector: prometheus # prometheus + prometheus_config: + port: 8080 # port prometheus uses to fetch metrics + +cache_config: + cpu_cache_capacity: 16 # GB, CPU memory used for cache + cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered + gpu_cache_capacity: 4 # GB, GPU memory used for cache + gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered + cache_insert_data: false # whether to load inserted data into cache + +engine_config: + use_blas_threshold: 20 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times + # if nq >= use_blas_threshold, use OpenBlas, slower with stable response times + +resource_config: + search_resources: # define the GPUs used for search computation, valid value: gpux + - gpu0 + index_build_device: gpu0 # GPU used for building index diff --git a/shards/conftest.py b/shards/conftest.py new file mode 100644 index 0000000000..4cdcbdbe0c --- /dev/null +++ b/shards/conftest.py @@ -0,0 +1,39 @@ +import os +import logging +import pytest +import grpc +import tempfile +import shutil +from mishards import settings, db, create_app + +logger = logging.getLogger(__name__) + +tpath = tempfile.mkdtemp() +dirpath = '{}/db'.format(tpath) +filepath = '{}/meta.sqlite'.format(dirpath) +os.makedirs(dirpath, 0o777) +settings.TestingConfig.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}?check_same_thread=False'.format( + filepath) + + +@pytest.fixture +def app(request): + app = create_app(settings.TestingConfig) + db.drop_all() + db.create_all() + + yield app + + db.drop_all() + app.stop() + # shutil.rmtree(tpath) + + +@pytest.fixture +def started_app(app): + app.on_pre_run() + app.start(settings.SERVER_TEST_PORT) + + yield app + + app.stop() diff --git a/shards/discovery/__init__.py b/shards/discovery/__init__.py new file mode 100644 index 0000000000..a591d1cc1c --- /dev/null +++ b/shards/discovery/__init__.py @@ -0,0 +1,37 @@ +import os +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) + +import logging +from utils import dotdict + +logger = logging.getLogger(__name__) + + +class DiscoveryConfig(dotdict): + CONFIG_PREFIX = 'DISCOVERY_' + + def dump(self): + logger.info('----------- DiscoveryConfig -----------------') + for k, v in self.items(): + logger.info('{}: {}'.format(k, v)) + if len(self) <= 0: + logger.error(' Empty DiscoveryConfig Found! ') + logger.info('---------------------------------------------') + + @classmethod + def Create(cls, **kwargs): + o = cls() + + for k, v in os.environ.items(): + if not k.startswith(cls.CONFIG_PREFIX): + continue + o[k] = v + for k, v in kwargs.items(): + o[k] = v + + o.dump() + return o diff --git a/shards/discovery/factory.py b/shards/discovery/factory.py new file mode 100644 index 0000000000..5f5c7fcf95 --- /dev/null +++ b/shards/discovery/factory.py @@ -0,0 +1,22 @@ +import logging +from discovery import DiscoveryConfig +from utils.plugins import BaseMixin + +logger = logging.getLogger(__name__) +PLUGIN_PACKAGE_NAME = 'discovery.plugins' + + +class DiscoveryFactory(BaseMixin): + PLUGIN_TYPE = 'Discovery' + + def __init__(self, searchpath=None): + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) + + def _create(self, plugin_class, **kwargs): + conn_mgr = kwargs.pop('conn_mgr', None) + if not conn_mgr: + raise RuntimeError('Please pass conn_mgr to create discovery!') + + plugin_config = DiscoveryConfig.Create() + plugin = plugin_class.Create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return plugin diff --git a/shards/discovery/plugins/kubernetes_provider.py b/shards/discovery/plugins/kubernetes_provider.py new file mode 100644 index 0000000000..aaf6091f83 --- /dev/null +++ b/shards/discovery/plugins/kubernetes_provider.py @@ -0,0 +1,346 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) + +import re +import logging +import time +import copy +import threading +import queue +import enum +from kubernetes import client, config, watch + +logger = logging.getLogger(__name__) + +INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + + +class EventType(enum.Enum): + PodHeartBeat = 1 + Watch = 2 + + +class K8SMixin: + def __init__(self, namespace, in_cluster=False, **kwargs): + self.namespace = namespace + self.in_cluster = in_cluster + self.kwargs = kwargs + self.v1 = kwargs.get('v1', None) + if not self.namespace: + self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() + + if not self.v1: + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + +class K8SHeartbeatHandler(threading.Thread, K8SMixin): + name = 'kubernetes' + + def __init__(self, + message_queue, + namespace, + label_selector, + in_cluster=False, + **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.label_selector = label_selector + self.poll_interval = kwargs.get('poll_interval', 5) + + def run(self): + while not self.terminate: + try: + pods = self.v1.list_namespaced_pod( + namespace=self.namespace, + label_selector=self.label_selector) + event_message = {'eType': EventType.PodHeartBeat, 'events': []} + for item in pods.items: + pod = self.v1.read_namespaced_pod(name=item.metadata.name, + namespace=self.namespace) + name = pod.metadata.name + ip = pod.status.pod_ip + phase = pod.status.phase + reason = pod.status.reason + message = pod.status.message + ready = True if phase == 'Running' else False + + pod_event = dict(pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message) + + event_message['events'].append(pod_event) + + self.queue.put(event_message) + + except Exception as exc: + logger.error(exc) + + time.sleep(self.poll_interval) + + def stop(self): + self.terminate = True + + +class K8SEventListener(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.at_start_up = True + self._stop_event = threading.Event() + + def stop(self): + self.terminate = True + self._stop_event.set() + + def run(self): + resource_version = '' + w = watch.Watch() + for event in w.stream(self.v1.list_namespaced_event, + namespace=self.namespace, + field_selector='involvedObject.kind=Pod'): + if self.terminate: + break + + resource_version = int(event['object'].metadata.resource_version) + + info = dict( + eType=EventType.Watch, + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, + ) + self.at_start_up = False + # logger.info('Received event: {}'.format(info)) + self.queue.put(info) + + +class EventHandler(threading.Thread): + def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): + threading.Thread.__init__(self) + self.mgr = mgr + self.queue = message_queue + self.kwargs = kwargs + self.terminate = False + self.pod_patt = re.compile(pod_patt) + self.namespace = namespace + + def stop(self): + self.terminate = True + + def on_drop(self, event, **kwargs): + pass + + def on_pod_started(self, event, **kwargs): + try_cnt = 3 + pod = None + while try_cnt > 0: + try_cnt -= 1 + try: + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], + namespace=self.namespace) + if not pod.status.pod_ip: + time.sleep(0.5) + continue + break + except client.rest.ApiException as exc: + time.sleep(0.5) + + if try_cnt <= 0 and not pod: + if not event['start_up']: + logger.error('Pod {} is started but cannot read pod'.format( + event['pod'])) + return + elif try_cnt <= 0 and not pod.status.pod_ip: + logger.warning('NoPodIPFoundError') + return + + logger.info('Register POD {} with IP {}'.format( + pod.metadata.name, pod.status.pod_ip)) + self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) + + def on_pod_killing(self, event, **kwargs): + logger.info('Unregister POD {}'.format(event['pod'])) + self.mgr.delete_pod(name=event['pod']) + + def on_pod_heartbeat(self, event, **kwargs): + names = self.mgr.conn_mgr.conn_names + + running_names = set() + for each_event in event['events']: + if each_event['ready']: + self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) + running_names.add(each_event['pod']) + else: + self.mgr.delete_pod(name=each_event['pod']) + + to_delete = names - running_names + for name in to_delete: + self.mgr.delete_pod(name) + + logger.info(self.mgr.conn_mgr.conn_names) + + def handle_event(self, event): + if event['eType'] == EventType.PodHeartBeat: + return self.on_pod_heartbeat(event) + + if not event or (event['reason'] not in ('Started', 'Killing')): + return self.on_drop(event) + + if not re.match(self.pod_patt, event['pod']): + return self.on_drop(event) + + logger.info('Handling event: {}'.format(event)) + + if event['reason'] == 'Started': + return self.on_pod_started(event) + + return self.on_pod_killing(event) + + def run(self): + while not self.terminate: + try: + event = self.queue.get(timeout=1) + self.handle_event(event) + except queue.Empty: + continue + + +class KubernetesProviderSettings: + def __init__(self, namespace, pod_patt, label_selector, in_cluster, + poll_interval, port=None, **kwargs): + self.namespace = namespace + self.pod_patt = pod_patt + self.label_selector = label_selector + self.in_cluster = in_cluster + self.poll_interval = poll_interval + self.port = int(port) if port else 19530 + + +class KubernetesProvider(object): + name = 'kubernetes' + + def __init__(self, plugin_config, conn_mgr, **kwargs): + self.namespace = plugin_config.DISCOVERY_KUBERNETES_NAMESPACE + self.pod_patt = plugin_config.DISCOVERY_KUBERNETES_POD_PATT + self.label_selector = plugin_config.DISCOVERY_KUBERNETES_LABEL_SELECTOR + self.in_cluster = plugin_config.DISCOVERY_KUBERNETES_IN_CLUSTER.lower() + self.in_cluster = self.in_cluster == 'true' + self.poll_interval = plugin_config.DISCOVERY_KUBERNETES_POLL_INTERVAL + self.poll_interval = int(self.poll_interval) if self.poll_interval else 5 + self.port = plugin_config.DISCOVERY_KUBERNETES_PORT + self.port = int(self.port) if self.port else 19530 + self.kwargs = kwargs + self.queue = queue.Queue() + + self.conn_mgr = conn_mgr + + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + self.listener = K8SEventListener(message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs) + + self.pod_heartbeater = K8SHeartbeatHandler( + message_queue=self.queue, + namespace=self.namespace, + label_selector=self.label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + poll_interval=self.poll_interval, + **kwargs) + + self.event_handler = EventHandler(mgr=self, + message_queue=self.queue, + namespace=self.namespace, + pod_patt=self.pod_patt, + **kwargs) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + def start(self): + self.listener.daemon = True + self.listener.start() + self.event_handler.start() + + self.pod_heartbeater.start() + + def stop(self): + self.listener.stop() + self.pod_heartbeater.stop() + self.event_handler.stop() + + @classmethod + def Create(cls, conn_mgr, plugin_config, **kwargs): + discovery = cls(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return discovery + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(KubernetesProvider) + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))))) + sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__))))) + + class Connect: + def register(self, name, value): + logger.error('Register: {} - {}'.format(name, value)) + + def unregister(self, name): + logger.error('Unregister: {}'.format(name)) + + @property + def conn_names(self): + return set() + + connect_mgr = Connect() + + from discovery import DiscoveryConfig + settings = DiscoveryConfig(DISCOVERY_KUBERNETES_NAMESPACE='xp', + DISCOVERY_KUBERNETES_POD_PATT=".*-ro-servers-.*", + DISCOVERY_KUBERNETES_LABEL_SELECTOR='tier=ro-servers', + DISCOVERY_KUBERNETES_POLL_INTERVAL=5, + DISCOVERY_KUBERNETES_IN_CLUSTER=False) + + provider_class = KubernetesProvider + t = provider_class(conn_mgr=connect_mgr, plugin_config=settings) + t.start() + cnt = 100 + while cnt > 0: + time.sleep(2) + cnt -= 1 + t.stop() diff --git a/shards/discovery/plugins/static_provider.py b/shards/discovery/plugins/static_provider.py new file mode 100644 index 0000000000..fca8c717db --- /dev/null +++ b/shards/discovery/plugins/static_provider.py @@ -0,0 +1,45 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import logging +import socket +from environs import Env + +logger = logging.getLogger(__name__) +env = Env() + + +class StaticDiscovery(object): + name = 'static' + + def __init__(self, config, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + hosts = env.list('DISCOVERY_STATIC_HOSTS', []) + self.port = env.int('DISCOVERY_STATIC_PORT', 19530) + self.hosts = [socket.gethostbyname(host) for host in hosts] + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + @classmethod + def Create(cls, conn_mgr, plugin_config, **kwargs): + discovery = cls(config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return discovery + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(StaticDiscovery) diff --git a/shards/kubernetes_demo/milvus_auxiliary.yaml b/shards/kubernetes_demo/milvus_auxiliary.yaml new file mode 100644 index 0000000000..fff27adc6f --- /dev/null +++ b/shards/kubernetes_demo/milvus_auxiliary.yaml @@ -0,0 +1,67 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-mysql + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: mysql + ports: + - protocol: TCP + port: 3306 + targetPort: 3306 + name: mysql + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: milvus-mysql + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: mysql + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: mysql + spec: + containers: + - name: milvus-mysql + image: mysql:5.7 + imagePullPolicy: IfNotPresent + # lifecycle: + # postStart: + # exec: + # command: ["/bin/sh", "-c", "mysql -h milvus-mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"CREATE DATABASE IF NOT EXISTS ${DATABASE};\"; \ + # mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"GRANT ALL PRIVILEGES ON ${DATABASE}.* TO 'root'@'%';\""] + env: + - name: MYSQL_ROOT_PASSWORD + value: milvusroot + - name: DATABASE + value: milvus + ports: + - name: mysql-port + containerPort: 3306 + volumeMounts: + - name: milvus-mysql-disk + mountPath: /data + subPath: mysql + - name: milvus-mysql-configmap + mountPath: /etc/mysql/mysql.conf.d/mysqld.cnf + subPath: milvus_mysql_config.yml + + volumes: + - name: milvus-mysql-disk + persistentVolumeClaim: + claimName: milvus-mysql-disk + - name: milvus-mysql-configmap + configMap: + name: milvus-mysql-configmap diff --git a/shards/kubernetes_demo/milvus_configmap.yaml b/shards/kubernetes_demo/milvus_configmap.yaml new file mode 100644 index 0000000000..cb751c02f1 --- /dev/null +++ b/shards/kubernetes_demo/milvus_configmap.yaml @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-mysql-configmap + namespace: milvus +data: + milvus_mysql_config.yml: | + [mysqld] + pid-file = /var/run/mysqld/mysqld.pid + socket = /var/run/mysqld/mysqld.sock + datadir = /data + log-error = /var/log/mysql/error.log # mount out to host + # By default we only accept connections from localhost + bind-address = 0.0.0.0 + # Disabling symbolic-links is recommended to prevent assorted security risks + symbolic-links=0 + character-set-server = utf8mb4 + collation-server = utf8mb4_unicode_ci + init_connect='SET NAMES utf8mb4' + skip-character-set-client-handshake = true + max_connections = 1000 + wait_timeout = 31536000 + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-proxy-configmap + namespace: milvus +data: + milvus_proxy_config.yml: | + DEBUG=True + TESTING=False + + WOSERVER=tcp://milvus-wo-servers:19530 + SERVER_PORT=19530 + + DISCOVERY_CLASS_NAME=kubernetes + DISCOVERY_KUBERNETES_NAMESPACE=milvus + DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.* + DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers + DISCOVERY_KUBERNETES_POLL_INTERVAL=10 + DISCOVERY_KUBERNETES_IN_CLUSTER=True + + SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:milvusroot@milvus-mysql:3306/milvus?charset=utf8mb4 + SQLALCHEMY_POOL_SIZE=50 + SQLALCHEMY_POOL_RECYCLE=7200 + + LOG_PATH=/var/log/milvus + TIMEZONE=Asia/Shanghai +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-roserver-configmap + namespace: milvus +data: + config.yml: | + server_config: + address: 0.0.0.0 + port: 19530 + mode: cluster_readonly + + db_config: + primary_path: /var/milvus + backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus + insert_buffer_size: 2 + + metric_config: + enable_monitor: off # true is on, false is off + + cache_config: + cpu_cache_capacity: 12 # memory pool to hold index data, unit: GB + cpu_cache_free_percent: 0.85 + insert_cache_immediately: false + # gpu_cache_capacity: 4 + # gpu_cache_free_percent: 0.85 + # gpu_ids: + # - 0 + + engine_config: + use_blas_threshold: 800 + + resource_config: + search_resources: + - gpu0 + + log.conf: | + * GLOBAL: + FORMAT = "%datetime | %level | %logger | %msg" + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-global.log" + ENABLED = true + TO_FILE = true + TO_STANDARD_OUTPUT = true + SUBSECOND_PRECISION = 3 + PERFORMANCE_TRACKING = false + MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB + * DEBUG: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-debug.log" + ENABLED = true + * WARNING: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-warning.log" + * TRACE: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-trace.log" + * VERBOSE: + FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg" + TO_FILE = true + TO_STANDARD_OUTPUT = true + ## Error logs + * ERROR: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-error.log" + * FATAL: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-fatal.log" + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-woserver-configmap + namespace: milvus +data: + config.yml: | + server_config: + address: 0.0.0.0 + port: 19530 + mode: cluster_writable + + db_config: + primary_path: /var/milvus + backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus + insert_buffer_size: 2 + + metric_config: + enable_monitor: off # true is on, false is off + + cache_config: + cpu_cache_capacity: 2 # memory pool to hold index data, unit: GB + cpu_cache_free_percent: 0.85 + insert_cache_immediately: false + # gpu_cache_capacity: 4 + # gpu_cache_free_percent: 0.85 + # gpu_ids: + # - 0 + + engine_config: + use_blas_threshold: 800 + + resource_config: + search_resources: + - gpu0 + + + log.conf: | + * GLOBAL: + FORMAT = "%datetime | %level | %logger | %msg" + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-global.log" + ENABLED = true + TO_FILE = true + TO_STANDARD_OUTPUT = true + SUBSECOND_PRECISION = 3 + PERFORMANCE_TRACKING = false + MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB + * DEBUG: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-debug.log" + ENABLED = true + * WARNING: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-warning.log" + * TRACE: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-trace.log" + * VERBOSE: + FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg" + TO_FILE = true + TO_STANDARD_OUTPUT = true + ## Error logs + * ERROR: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-error.log" + * FATAL: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-fatal.log" diff --git a/shards/kubernetes_demo/milvus_data_pvc.yaml b/shards/kubernetes_demo/milvus_data_pvc.yaml new file mode 100644 index 0000000000..480354507d --- /dev/null +++ b/shards/kubernetes_demo/milvus_data_pvc.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-db-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-log-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-mysql-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-redis-disk + namespace: milvus +spec: + accessModes: + - ReadWriteOnce + storageClassName: default + resources: + requests: + storage: 5Gi diff --git a/shards/kubernetes_demo/milvus_proxy.yaml b/shards/kubernetes_demo/milvus_proxy.yaml new file mode 100644 index 0000000000..13916b7b2b --- /dev/null +++ b/shards/kubernetes_demo/milvus_proxy.yaml @@ -0,0 +1,88 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-proxy-servers + namespace: milvus +spec: + type: LoadBalancer + selector: + app: milvus + tier: proxy + ports: + - name: tcp + protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: milvus-proxy + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: proxy + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: proxy + spec: + containers: + - name: milvus-proxy + image: milvusdb/mishards:0.1.0-rc0 + imagePullPolicy: Always + command: ["python", "mishards/main.py"] + resources: + limits: + memory: "3Gi" + cpu: "4" + requests: + memory: "2Gi" + ports: + - name: tcp + containerPort: 5000 + env: + # - name: SQL_ECHO + # value: "True" + - name: DEBUG + value: "False" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MILVUS_CLIENT + value: "False" + - name: LOG_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: LOG_PATH + value: /var/log/milvus + - name: SD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SD_ROSERVER_POD_PATT + value: ".*-ro-servers-.*" + volumeMounts: + - name: milvus-proxy-configmap + mountPath: /source/mishards/.env + subPath: milvus_proxy_config.yml + - name: milvus-log-disk + mountPath: /var/log/milvus + subPath: proxylog + # imagePullSecrets: + # - name: regcred + volumes: + - name: milvus-proxy-configmap + configMap: + name: milvus-proxy-configmap + - name: milvus-log-disk + persistentVolumeClaim: + claimName: milvus-log-disk diff --git a/shards/kubernetes_demo/milvus_rbac.yaml b/shards/kubernetes_demo/milvus_rbac.yaml new file mode 100644 index 0000000000..e6f302be15 --- /dev/null +++ b/shards/kubernetes_demo/milvus_rbac.yaml @@ -0,0 +1,24 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pods-list +rules: +- apiGroups: [""] + resources: ["pods", "events"] + verbs: ["list", "get", "watch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pods-list +subjects: +- kind: ServiceAccount + name: default + namespace: milvus +roleRef: + kind: ClusterRole + name: pods-list + apiGroup: rbac.authorization.k8s.io +--- diff --git a/shards/kubernetes_demo/milvus_stateful_servers.yaml b/shards/kubernetes_demo/milvus_stateful_servers.yaml new file mode 100644 index 0000000000..4ff5045599 --- /dev/null +++ b/shards/kubernetes_demo/milvus_stateful_servers.yaml @@ -0,0 +1,68 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-ro-servers + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: ro-servers + ports: + - protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: milvus-ro-servers + namespace: milvus +spec: + serviceName: "milvus-ro-servers" + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: ro-servers + spec: + terminationGracePeriodSeconds: 11 + containers: + - name: milvus-ro-server + image: milvusdb/milvus:0.5.0-d102119-ede20b + imagePullPolicy: Always + ports: + - containerPort: 19530 + resources: + limits: + memory: "16Gi" + cpu: "8.0" + requests: + memory: "14Gi" + volumeMounts: + - name: milvus-db-disk + mountPath: /var/milvus + subPath: dbdata + - name: milvus-roserver-configmap + mountPath: /opt/milvus/conf/server_config.yaml + subPath: config.yml + - name: milvus-roserver-configmap + mountPath: /opt/milvus/conf/log_config.conf + subPath: log.conf + # imagePullSecrets: + # - name: regcred + # tolerations: + # - key: "worker" + # operator: "Equal" + # value: "performance" + # effect: "NoSchedule" + volumes: + - name: milvus-roserver-configmap + configMap: + name: milvus-roserver-configmap + - name: milvus-db-disk + persistentVolumeClaim: + claimName: milvus-db-disk diff --git a/shards/kubernetes_demo/milvus_write_servers.yaml b/shards/kubernetes_demo/milvus_write_servers.yaml new file mode 100644 index 0000000000..6aec4b0373 --- /dev/null +++ b/shards/kubernetes_demo/milvus_write_servers.yaml @@ -0,0 +1,70 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-wo-servers + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: wo-servers + ports: + - protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: milvus-wo-servers + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: wo-servers + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: wo-servers + spec: + containers: + - name: milvus-wo-server + image: milvusdb/milvus:0.5.0-d102119-ede20b + imagePullPolicy: Always + ports: + - containerPort: 19530 + resources: + limits: + memory: "5Gi" + cpu: "1.0" + requests: + memory: "4Gi" + volumeMounts: + - name: milvus-db-disk + mountPath: /var/milvus + subPath: dbdata + - name: milvus-woserver-configmap + mountPath: /opt/milvus/conf/server_config.yaml + subPath: config.yml + - name: milvus-woserver-configmap + mountPath: /opt/milvus/conf/log_config.conf + subPath: log.conf + # imagePullSecrets: + # - name: regcred + # tolerations: + # - key: "worker" + # operator: "Equal" + # value: "performance" + # effect: "NoSchedule" + volumes: + - name: milvus-woserver-configmap + configMap: + name: milvus-woserver-configmap + - name: milvus-db-disk + persistentVolumeClaim: + claimName: milvus-db-disk diff --git a/shards/kubernetes_demo/start.sh b/shards/kubernetes_demo/start.sh new file mode 100755 index 0000000000..7441aa5d70 --- /dev/null +++ b/shards/kubernetes_demo/start.sh @@ -0,0 +1,368 @@ +#!/bin/bash + +UL=`tput smul` +NOUL=`tput rmul` +BOLD=`tput bold` +NORMAL=`tput sgr0` +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +ENDC='\033[0m' + +function showHelpMessage () { + echo -e "${BOLD}Usage:${NORMAL} ${RED}$0${ENDC} [option...] {cleanup${GREEN}|${ENDC}baseup${GREEN}|${ENDC}appup${GREEN}|${ENDC}appdown${GREEN}|${ENDC}allup}" >&2 + echo + echo " -h, --help show help message" + echo " ${BOLD}cleanup, delete all resources${NORMAL}" + echo " ${BOLD}baseup, start all required base resources${NORMAL}" + echo " ${BOLD}appup, start all pods${NORMAL}" + echo " ${BOLD}appdown, remove all pods${NORMAL}" + echo " ${BOLD}allup, start all base resources and pods${NORMAL}" + echo " ${BOLD}scale-proxy, scale proxy${NORMAL}" + echo " ${BOLD}scale-ro-server, scale readonly servers${NORMAL}" + echo " ${BOLD}scale-worker, scale calculation workers${NORMAL}" +} + +function showscaleHelpMessage () { + echo -e "${BOLD}Usage:${NORMAL} ${RED}$0 $1${ENDC} [option...] {1|2|3|4|...}" >&2 + echo + echo " -h, --help show help message" + echo " ${BOLD}number, (int) target scale number" +} + +function PrintScaleSuccessMessage() { + echo -e "${BLUE}${BOLD}Successfully Scaled: ${1} --> ${2}${ENDC}" +} + +function PrintPodStatusMessage() { + echo -e "${BOLD}${1}${NORMAL}" +} + +timeout=60 + +function setUpMysql () { + mysqlUserName=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[2], level2, "/"); + print level2[3]}') + mysqlPassword=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[3], level3, "@"); + print level3[1]}') + mysqlDBName=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[4], level4, "/"); + print level4[2]}') + mysqlContainer=$(kubectl get pods -n milvus | grep milvus-mysql | awk '{print $1}') + + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "CREATE DATABASE IF NOT EXISTS $mysqlDBName;" + + checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l) + counter=0 + while [ $checkDBExists -lt 1 ]; do + sleep 1 + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "Creating MySQL database $mysqlDBName timeout" + return 1 + fi + checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l) + done; + + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "GRANT ALL PRIVILEGES ON $mysqlDBName.* TO '$mysqlUserName'@'%';" + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "FLUSH PRIVILEGES;" + checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l) + counter=0 + while [ $checkGrant -lt 1 ]; do + sleep 1 + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "Granting all privileges on $mysqlDBName to $mysqlUserName timeout" + return 1 + fi + checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l) + done; +} + +function checkStatefulSevers() { + stateful_replicas=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Replicas:" | awk '{print $2}') + stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}') + + counter=0 + prev=$stateful_running_pods + PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas" + while [ $stateful_replicas != $stateful_running_pods ]; do + echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}" + sleep 1; + + let counter=counter+1 + if [ $counter -eq $timeout ]; then + return 1; + fi + + stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}') + if [ $stateful_running_pods -ne $prev ]; then + PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas" + fi + prev=$stateful_running_pods + done; + return 0; +} + +function checkDeployment() { + deployment_name=$1 + replicas=$(kubectl describe deployment -n milvus $deployment_name | grep "Replicas:" | awk '{print $2}') + running=$(kubectl get pods -n milvus | grep $deployment_name | grep Running | wc -l) + + counter=0 + prev=$running + PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas" + while [ $replicas != $running ]; do + echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}" + sleep 1; + + let counter=counter+1 + if [ $counter == $timeout ]; then + return 1 + fi + + running=$(kubectl get pods -n milvus | grep "$deployment_name" | grep Running | wc -l) + if [ $running -ne $prev ]; then + PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas" + fi + prev=$running + done +} + + +function startDependencies() { + kubectl apply -f milvus_data_pvc.yaml + kubectl apply -f milvus_configmap.yaml + kubectl apply -f milvus_auxiliary.yaml + + counter=0 + while [ $(kubectl get pvc -n milvus | grep Bound | wc -l) != 4 ]; do + sleep 1; + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "baseup timeout" + return 1 + fi + done + checkDeployment "milvus-mysql" +} + +function startApps() { + counter=0 + errmsg="" + echo -e "${GREEN}${BOLD}Checking required resouces...${NORMAL}${ENDC}" + while [ $counter -lt $timeout ]; do + sleep 1; + if [ $(kubectl get pvc -n milvus 2>/dev/null | grep Bound | wc -l) != 4 ]; then + echo -e "${YELLOW}No pvc. Wait another sec... $counter${ENDC}"; + errmsg='No pvc'; + let counter=counter+1; + continue + fi + if [ $(kubectl get configmap -n milvus 2>/dev/null | grep milvus | wc -l) != 4 ]; then + echo -e "${YELLOW}No configmap. Wait another sec... $counter${ENDC}"; + errmsg='No configmap'; + let counter=counter+1; + continue + fi + if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-mysql | awk '{print $2}') == "" ]; then + echo -e "${YELLOW}No mysql. Wait another sec... $counter${ENDC}"; + errmsg='No mysql'; + let counter=counter+1; + continue + fi + # if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-redis | awk '{print $2}') == "" ]; then + # echo -e "${NORMAL}${YELLOW}No redis. Wait another sec... $counter${ENDC}"; + # errmsg='No redis'; + # let counter=counter+1; + # continue + # fi + break; + done + + if [ $counter -ge $timeout ]; then + echo -e "${RED}${BOLD}Start APP Error: $errmsg${NORMAL}${ENDC}" + exit 1; + fi + + echo -e "${GREEN}${BOLD}Setup requried database ...${NORMAL}${ENDC}" + setUpMysql + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Setup MySQL database timeout${NORMAL}${ENDC}" + exit 1 + fi + + echo -e "${GREEN}${BOLD}Start servers ...${NORMAL}${ENDC}" + kubectl apply -f milvus_stateful_servers.yaml + kubectl apply -f milvus_write_servers.yaml + + checkStatefulSevers + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-ro-servers timeout${NORMAL}${ENDC}" + exit 1 + fi + + checkDeployment "milvus-wo-servers" + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-wo-servers timeout${NORMAL}${ENDC}" + exit 1 + fi + + echo -e "${GREEN}${BOLD}Start rolebinding ...${NORMAL}${ENDC}" + kubectl apply -f milvus_rbac.yaml + + echo -e "${GREEN}${BOLD}Start proxies ...${NORMAL}${ENDC}" + kubectl apply -f milvus_proxy.yaml + + checkDeployment "milvus-proxy" + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-proxy timeout${NORMAL}${ENDC}" + exit 1 + fi + + # echo -e "${GREEN}${BOLD}Start flower ...${NORMAL}${ENDC}" + # kubectl apply -f milvus_flower.yaml + # checkDeployment "milvus-flower" + # if [ $? -ne 0 ]; then + # echo -e "${RED}${BOLD}Starting milvus-flower timeout${NORMAL}${ENDC}" + # exit 1 + # fi + +} + +function removeApps () { + # kubectl delete -f milvus_flower.yaml 2>/dev/null + kubectl delete -f milvus_proxy.yaml 2>/dev/null + kubectl delete -f milvus_stateful_servers.yaml 2>/dev/null + kubectl delete -f milvus_write_servers.yaml 2>/dev/null + kubectl delete -f milvus_rbac.yaml 2>/dev/null + # kubectl delete -f milvus_monitor.yaml 2>/dev/null +} + +function scaleDeployment() { + deployment_name=$1 + subcommand=$2 + des=$3 + + case $des in + -h|--help|"") + showscaleHelpMessage $subcommand + exit 3 + ;; + esac + + cur=$(kubectl get deployment -n milvus $deployment_name |grep $deployment_name |awk '{split($2, status, "/"); print status[2];}') + echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}${deployment_name}, Scaling to ${BOLD}$des ...${ENDC}"; + scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${des}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}" + exit 1 + fi + + checkDeployment $deployment_name + + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale ${deployment_name} timeout${NORMAL}${ENDC}" + scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${cur}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}" + exit 2 + fi + echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}" + exit 1 + fi + PrintScaleSuccessMessage $cur $des +} + +function scaleROServers() { + subcommand=$1 + des=$2 + case $des in + -h|--help|"") + showscaleHelpMessage $subcommand + exit 3 + ;; + esac + + cur=$(kubectl get statefulset -n milvus milvus-ro-servers |tail -n 1 |awk '{split($2, status, "/"); print status[2];}') + echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}Readonly Servers, Scaling to ${BOLD}$des ...${ENDC}"; + scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${des}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}" + exit 1 + fi + + checkStatefulSevers + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale milvus-ro-servers timeout${NORMAL}${ENDC}" + scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${cur}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}" + exit 2 + fi + echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}" + exit 1 + fi + + PrintScaleSuccessMessage $cur $des +} + + +case "$1" in + +cleanup) + kubectl delete -f . 2>/dev/null + echo -e "${BLUE}${BOLD}All resources are removed${NORMAL}${ENDC}" + ;; + +appdown) + removeApps; + echo -e "${BLUE}${BOLD}All pods are removed${NORMAL}${ENDC}" + ;; + +baseup) + startDependencies; + echo -e "${BLUE}${BOLD}All pvc, configmap and services up${NORMAL}${ENDC}" + ;; + +appup) + startApps; + echo -e "${BLUE}${BOLD}All pods up${NORMAL}${ENDC}" + ;; + +allup) + startDependencies; + sleep 2 + startApps; + echo -e "${BLUE}${BOLD}All resources and pods up${NORMAL}${ENDC}" + ;; + +scale-ro-server) + scaleROServers $1 $2 + ;; + +scale-proxy) + scaleDeployment "milvus-proxy" $1 $2 + ;; + +-h|--help|*) + showHelpMessage + ;; + +esac diff --git a/shards/manager.py b/shards/manager.py new file mode 100644 index 0000000000..4157b9343e --- /dev/null +++ b/shards/manager.py @@ -0,0 +1,18 @@ +import fire +from mishards import db, settings + + +class DBHandler: + @classmethod + def create_all(cls): + db.create_all() + + @classmethod + def drop_all(cls): + db.drop_all() + + +if __name__ == '__main__': + db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI) + from mishards import models + fire.Fire(DBHandler) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example new file mode 100644 index 0000000000..f1c812a269 --- /dev/null +++ b/shards/mishards/.env.example @@ -0,0 +1,36 @@ +DEBUG=True + +WOSERVER=tcp://127.0.0.1:19530 +SERVER_PORT=19532 +SERVER_TEST_PORT=19888 + +#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_ECHO=False + +#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_TEST_ECHO=False + +TRACER_PLUGIN_PATH=/tmp/plugins + +# TRACING_TEST_TYPE=jaeger +TRACER_CLASS_NAME=jaeger +TRACING_SERVICE_NAME=fortest +TRACING_SAMPLER_TYPE=const +TRACING_SAMPLER_PARAM=1 +TRACING_LOG_PAYLOAD=True +#TRACING_SAMPLER_TYPE=probabilistic +#TRACING_SAMPLER_PARAM=0.5 + +#DISCOVERY_PLUGIN_PATH= +#DISCOVERY_CLASS_NAME=kubernetes + +DISCOVERY_STATIC_HOSTS=127.0.0.1 +DISCOVERY_STATIC_PORT=19530 + +DISCOVERY_KUBERNETES_NAMESPACE=xp +DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.* +DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers +DISCOVERY_KUBERNETES_POLL_INTERVAL=5 +DISCOVERY_KUBERNETES_IN_CLUSTER=False diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py new file mode 100644 index 0000000000..a3c55c4ae3 --- /dev/null +++ b/shards/mishards/__init__.py @@ -0,0 +1,40 @@ +import logging +from mishards import settings +logger = logging.getLogger() + +from mishards.db_base import DB +db = DB() + +from mishards.server import Server +grpc_server = Server() + + +def create_app(testing_config=None): + config = testing_config if testing_config else settings.DefaultConfig + db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + + from mishards.connections import ConnectionMgr + connect_mgr = ConnectionMgr() + + from discovery.factory import DiscoveryFactory + discover = DiscoveryFactory(config.DISCOVERY_PLUGIN_PATH).create(config.DISCOVERY_CLASS_NAME, + conn_mgr=connect_mgr) + + from mishards.grpc_utils import GrpcSpanDecorator + from tracer.factory import TracerFactory + tracer = TracerFactory(config.TRACER_PLUGIN_PATH).create(config.TRACER_CLASS_NAME, + plugin_config=settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) + + from mishards.router.factory import RouterFactory + router = RouterFactory(config.ROUTER_PLUGIN_PATH).create(config.ROUTER_CLASS_NAME, + conn_mgr=connect_mgr) + + grpc_server.init_app(conn_mgr=connect_mgr, + tracer=tracer, + router=router, + discover=discover) + + from mishards import exception_handlers + + return grpc_server diff --git a/shards/mishards/connections.py b/shards/mishards/connections.py new file mode 100644 index 0000000000..618690a099 --- /dev/null +++ b/shards/mishards/connections.py @@ -0,0 +1,154 @@ +import logging +import threading +from functools import wraps +from milvus import Milvus + +from mishards import (settings, exceptions) +from utils import singleton + +logger = logging.getLogger(__name__) + + +class Connection: + def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): + self.name = name + self.uri = uri + self.max_retry = max_retry + self.retried = 0 + self.conn = Milvus() + self.error_handlers = [] if not error_handlers else error_handlers + self.on_retry_func = kwargs.get('on_retry_func', None) + # self._connect() + + def __str__(self): + return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) + + def _connect(self, metadata=None): + try: + self.conn.connect(uri=self.uri) + except Exception as e: + if not self.error_handlers: + raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) + for handler in self.error_handlers: + handler(e, metadata=metadata) + + @property + def can_retry(self): + return self.retried < self.max_retry + + @property + def connected(self): + return self.conn.connected() + + def on_retry(self): + if self.on_retry_func: + self.on_retry_func(self) + else: + self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) + + def on_connect(self, metadata=None): + while not self.connected and self.can_retry: + self.retried += 1 + self.on_retry() + self._connect(metadata=metadata) + + if not self.can_retry and not self.connected: + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, + metadata=metadata)) + + self.retried = 0 + + def connect(self, func, exception_handler=None): + @wraps(func) + def inner(*args, **kwargs): + self.on_connect() + try: + return func(*args, **kwargs) + except Exception as e: + if exception_handler: + exception_handler(e) + else: + raise e + return inner + + +@singleton +class ConnectionMgr: + def __init__(self): + self.metas = {} + self.conns = {} + + @property + def conn_names(self): + return set(self.metas.keys()) - set(['WOSERVER']) + + def conn(self, name, metadata, throw=False): + c = self.conns.get(name, None) + if not c: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + threaded = { + threading.get_ident(): this_conn + } + self.conns[name] = threaded + return this_conn + + tid = threading.get_ident() + rconn = c.get(tid, None) + if not rconn: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + c[tid] = this_conn + return this_conn + + return rconn + + def on_new_meta(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) + self.metas[name] = url + + def on_duplicate_meta(self, name, url): + if self.metas[name] == url: + return self.on_same_meta(name, url) + + return self.on_diff_meta(name, url) + + def on_same_meta(self, name, url): + # logger.warning('Register same meta: {}:{}'.format(name, url)) + pass + + def on_diff_meta(self, name, url): + logger.warning('Received {} with diff url={}'.format(name, url)) + self.metas[name] = url + self.conns[name] = {} + + def on_unregister_meta(self, name, url): + logger.info('Unregister name={};url={}'.format(name, url)) + self.conns.pop(name, None) + + def on_nonexisted_meta(self, name): + logger.warning('Non-existed meta: {}'.format(name)) + + def register(self, name, url): + meta = self.metas.get(name) + if not meta: + return self.on_new_meta(name, url) + else: + return self.on_duplicate_meta(name, url) + + def unregister(self, name): + logger.info('Unregister Connection: name={}'.format(name)) + url = self.metas.pop(name, None) + if url is None: + return self.on_nonexisted_meta(name) + return self.on_unregister_meta(name, url) diff --git a/shards/mishards/db_base.py b/shards/mishards/db_base.py new file mode 100644 index 0000000000..5f2eee9ba1 --- /dev/null +++ b/shards/mishards/db_base.py @@ -0,0 +1,52 @@ +import logging +from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, scoped_session +from sqlalchemy.orm.session import Session as SessionBase + +logger = logging.getLogger(__name__) + + +class LocalSession(SessionBase): + def __init__(self, db, autocommit=False, autoflush=True, **options): + self.db = db + bind = options.pop('bind', None) or db.engine + SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + + +class DB: + Model = declarative_base() + + def __init__(self, uri=None, echo=False): + self.echo = echo + uri and self.init_db(uri, echo) + self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) + + def init_db(self, uri, echo=False): + url = make_url(uri) + if url.get_backend_name() == 'sqlite': + self.engine = create_engine(url) + else: + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + pool_pre_ping=True, + echo=echo, + max_overflow=0) + self.uri = uri + self.url = url + + def __str__(self): + return ''.format(self.url.get_backend_name(), self.url.database) + + @property + def Session(self): + return self.session_factory() + + def remove_session(self): + self.session_factory.remove() + + def drop_all(self): + self.Model.metadata.drop_all(self.engine) + + def create_all(self): + self.Model.metadata.create_all(self.engine) diff --git a/shards/mishards/exception_codes.py b/shards/mishards/exception_codes.py new file mode 100644 index 0000000000..bdd4572dd5 --- /dev/null +++ b/shards/mishards/exception_codes.py @@ -0,0 +1,10 @@ +INVALID_CODE = -1 + +CONNECT_ERROR_CODE = 10001 +CONNECTTION_NOT_FOUND_CODE = 10002 +DB_ERROR_CODE = 10003 + +TABLE_NOT_FOUND_CODE = 20001 +INVALID_ARGUMENT_CODE = 20002 +INVALID_DATE_RANGE_CODE = 20003 +INVALID_TOPK_CODE = 20004 diff --git a/shards/mishards/exception_handlers.py b/shards/mishards/exception_handlers.py new file mode 100644 index 0000000000..c79a6db5a3 --- /dev/null +++ b/shards/mishards/exception_handlers.py @@ -0,0 +1,82 @@ +import logging +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from mishards import grpc_server as server, exceptions + +logger = logging.getLogger(__name__) + + +def resp_handler(err, error_code): + if not isinstance(err, exceptions.BaseException): + return status_pb2.Status(error_code=error_code, reason=str(err)) + + status = status_pb2.Status(error_code=error_code, reason=err.message) + + if err.metadata is None: + return status + + resp_class = err.metadata.get('resp_class', None) + if not resp_class: + return status + + if resp_class == milvus_pb2.BoolReply: + return resp_class(status=status, bool_reply=False) + + if resp_class == milvus_pb2.VectorIds: + return resp_class(status=status, vector_id_array=[]) + + if resp_class == milvus_pb2.TopKQueryResultList: + return resp_class(status=status, topk_query_result=[]) + + if resp_class == milvus_pb2.TableRowCount: + return resp_class(status=status, table_row_count=-1) + + if resp_class == milvus_pb2.TableName: + return resp_class(status=status, table_name=[]) + + if resp_class == milvus_pb2.StringReply: + return resp_class(status=status, string_reply='') + + if resp_class == milvus_pb2.TableSchema: + return milvus_pb2.TableSchema( + status=status + ) + + if resp_class == milvus_pb2.IndexParam: + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status + ) + ) + + status.error_code = status_pb2.UNEXPECTED_ERROR + return status + + +@server.errorhandler(exceptions.TableNotFoundError) +def TableNotFoundErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + + +@server.errorhandler(exceptions.InvalidTopKError) +def InvalidTopKErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_TOPK) + + +@server.errorhandler(exceptions.InvalidArgumentError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + + +@server.errorhandler(exceptions.DBError) +def DBErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + + +@server.errorhandler(exceptions.InvalidRangeError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/shards/mishards/exceptions.py b/shards/mishards/exceptions.py new file mode 100644 index 0000000000..72839f88d2 --- /dev/null +++ b/shards/mishards/exceptions.py @@ -0,0 +1,38 @@ +import mishards.exception_codes as codes + + +class BaseException(Exception): + code = codes.INVALID_CODE + message = 'BaseException' + + def __init__(self, message='', metadata=None): + self.message = self.__class__.__name__ if not message else message + self.metadata = metadata + + +class ConnectionConnectError(BaseException): + code = codes.CONNECT_ERROR_CODE + + +class ConnectionNotFoundError(BaseException): + code = codes.CONNECTTION_NOT_FOUND_CODE + + +class DBError(BaseException): + code = codes.DB_ERROR_CODE + + +class TableNotFoundError(BaseException): + code = codes.TABLE_NOT_FOUND_CODE + + +class InvalidTopKError(BaseException): + code = codes.INVALID_TOPK_CODE + + +class InvalidArgumentError(BaseException): + code = codes.INVALID_ARGUMENT_CODE + + +class InvalidRangeError(BaseException): + code = codes.INVALID_DATE_RANGE_CODE diff --git a/shards/mishards/factories.py b/shards/mishards/factories.py new file mode 100644 index 0000000000..52c0253b39 --- /dev/null +++ b/shards/mishards/factories.py @@ -0,0 +1,54 @@ +import time +import datetime +import random +import factory +from factory.alchemy import SQLAlchemyModelFactory +from faker import Faker +from faker.providers import BaseProvider + +from milvus.client.types import MetricType +from mishards import db +from mishards.models import Tables, TableFiles + + +class FakerProvider(BaseProvider): + def this_date(self): + t = datetime.datetime.today() + return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day + + +factory.Faker.add_provider(FakerProvider) + + +class TablesFactory(SQLAlchemyModelFactory): + class Meta: + model = Tables + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table_id = factory.Faker('uuid4') + state = factory.Faker('random_element', elements=(0, 1)) + dimension = factory.Faker('random_element', elements=(256, 512)) + created_on = int(time.time()) + index_file_size = 0 + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) + nlist = 16384 + + +class TableFilesFactory(SQLAlchemyModelFactory): + class Meta: + model = TableFiles + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table = factory.SubFactory(TablesFactory) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + file_id = factory.Faker('uuid4') + file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) + file_size = factory.Faker('random_number') + updated_time = int(time.time()) + created_on = int(time.time()) + date = factory.Faker('this_date') diff --git a/shards/mishards/grpc_utils/__init__.py b/shards/mishards/grpc_utils/__init__.py new file mode 100644 index 0000000000..f5225b2a66 --- /dev/null +++ b/shards/mishards/grpc_utils/__init__.py @@ -0,0 +1,37 @@ +from grpc_opentracing import SpanDecorator +from milvus.grpc_gen import status_pb2 + + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + status = None + if not rpc_info.response: + return + if isinstance(rpc_info.response, status_pb2.Status): + status = rpc_info.response + else: + try: + status = rpc_info.response.status + except Exception as e: + status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, + reason='Should not happen') + + if status.error_code == 0: + return + error_log = {'event': 'error', + 'request': rpc_info.request, + 'response': rpc_info.response + } + span.set_tag('error', True) + span.log_kv(error_log) + + +def mark_grpc_method(func): + setattr(func, 'grpc_method', True) + return func + + +def is_grpc_method(func): + if not func: + return False + return getattr(func, 'grpc_method', False) diff --git a/shards/mishards/grpc_utils/grpc_args_parser.py b/shards/mishards/grpc_utils/grpc_args_parser.py new file mode 100644 index 0000000000..039299803d --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_parser.py @@ -0,0 +1,102 @@ +from milvus import Status +from functools import wraps + + +def error_status(func): + @wraps(func) + def inner(*args, **kwargs): + try: + results = func(*args, **kwargs) + except Exception as e: + return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None + + return Status(code=0, message="Success"), results + + return inner + + +class GrpcArgsParser(object): + + @classmethod + @error_status + def parse_proto_TableSchema(cls, param): + _table_schema = { + 'status': param.status, + 'table_name': param.table_name, + 'dimension': param.dimension, + 'index_file_size': param.index_file_size, + 'metric_type': param.metric_type + } + + return _table_schema + + @classmethod + @error_status + def parse_proto_TableName(cls, param): + return param.table_name + + @classmethod + @error_status + def parse_proto_Index(cls, param): + _index = { + 'index_type': param.index_type, + 'nlist': param.nlist + } + + return _index + + @classmethod + @error_status + def parse_proto_IndexParam(cls, param): + _table_name = param.table_name + _status, _index = cls.parse_proto_Index(param.index) + + if not _status.OK(): + raise Exception("Argument parse error") + + return _table_name, _index + + @classmethod + @error_status + def parse_proto_Command(cls, param): + _cmd = param.cmd + + return _cmd + + @classmethod + @error_status + def parse_proto_Range(cls, param): + _start_value = param.start_value + _end_value = param.end_value + + return _start_value, _end_value + + @classmethod + @error_status + def parse_proto_RowRecord(cls, param): + return list(param.vector_data) + + @classmethod + @error_status + def parse_proto_SearchParam(cls, param): + _table_name = param.table_name + _topk = param.topk + _nprobe = param.nprobe + _status, _range = cls.parse_proto_Range(param.query_range_array) + + if not _status.OK(): + raise Exception("Argument parse error") + + _row_record = param.query_record_array + + return _table_name, _row_record, _range, _topk + + @classmethod + @error_status + def parse_proto_DeleteByRangeParam(cls, param): + _table_name = param.table_name + _range = param.range + _start_value = _range.start_value + _end_value = _range.end_value + + return _table_name, _start_value, _end_value diff --git a/shards/mishards/grpc_utils/grpc_args_wrapper.py b/shards/mishards/grpc_utils/grpc_args_wrapper.py new file mode 100644 index 0000000000..7447dbd995 --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_wrapper.py @@ -0,0 +1,4 @@ +# class GrpcArgsWrapper(object): + +# @classmethod +# def proto_TableName(cls): diff --git a/shards/mishards/grpc_utils/test_grpc.py b/shards/mishards/grpc_utils/test_grpc.py new file mode 100644 index 0000000000..9af09e5d0d --- /dev/null +++ b/shards/mishards/grpc_utils/test_grpc.py @@ -0,0 +1,75 @@ +import logging +import opentracing +from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method +from milvus.grpc_gen import status_pb2, milvus_pb2 + +logger = logging.getLogger(__name__) + + +class FakeTracer(opentracing.Tracer): + pass + + +class FakeSpan(opentracing.Span): + def __init__(self, context, tracer, **kwargs): + super(FakeSpan, self).__init__(tracer, context) + self.reset() + + def set_tag(self, key, value): + self.tags.append({key: value}) + + def log_kv(self, key_values, timestamp=None): + self.logs.append(key_values) + + def reset(self): + self.tags = [] + self.logs = [] + + +class FakeRpcInfo: + def __init__(self, request, response): + self.request = request + self.response = response + + +class TestGrpcUtils: + def test_span_deco(self): + request = 'request' + OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') + response = OK + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = milvus_pb2.BoolReply(status=OK, bool_reply=False) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = 1 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 1 + assert len(span.tags) == 1 + + response = 0 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + def test_is_grpc_method(self): + target = 1 + assert not is_grpc_method(target) + target = None + assert not is_grpc_method(target) diff --git a/shards/mishards/hash_ring.py b/shards/mishards/hash_ring.py new file mode 100644 index 0000000000..a97f3f580e --- /dev/null +++ b/shards/mishards/hash_ring.py @@ -0,0 +1,150 @@ +import math +import sys +from bisect import bisect + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + + +class HashRing(object): + def __init__(self, nodes=None, weights=None): + """`nodes` is a list of objects that have a proper __str__ representation. + `weights` is dictionary that sets weights to the nodes. The default + weight is that all nodes are equal. + """ + self.ring = dict() + self._sorted_keys = [] + + self.nodes = nodes + + if not weights: + weights = {} + self.weights = weights + + self._generate_circle() + + def _generate_circle(self): + """Generates the circle. + """ + total_weight = 0 + for node in self.nodes: + total_weight += self.weights.get(node, 1) + + for node in self.nodes: + weight = 1 + + if node in self.weights: + weight = self.weights.get(node) + + factor = math.floor((40 * len(self.nodes) * weight) / total_weight) + + for j in range(0, int(factor)): + b_key = self._hash_digest('%s-%s' % (node, j)) + + for i in range(0, 3): + key = self._hash_val(b_key, lambda x: x + i * 4) + self.ring[key] = node + self._sorted_keys.append(key) + + self._sorted_keys.sort() + + def get_node(self, string_key): + """Given a string key a corresponding node in the hash ring is returned. + + If the hash ring is empty, `None` is returned. + """ + pos = self.get_node_pos(string_key) + if pos is None: + return None + return self.ring[self._sorted_keys[pos]] + + def get_node_pos(self, string_key): + """Given a string key a corresponding node in the hash ring is returned + along with it's position in the ring. + + If the hash ring is empty, (`None`, `None`) is returned. + """ + if not self.ring: + return None + + key = self.gen_key(string_key) + + nodes = self._sorted_keys + pos = bisect(nodes, key) + + if pos == len(nodes): + return 0 + else: + return pos + + def iterate_nodes(self, string_key, distinct=True): + """Given a string key it returns the nodes as a generator that can hold the key. + + The generator iterates one time through the ring + starting at the correct position. + + if `distinct` is set, then the nodes returned will be unique, + i.e. no virtual copies will be returned. + """ + if not self.ring: + yield None, None + + returned_values = set() + + def distinct_filter(value): + if str(value) not in returned_values: + returned_values.add(str(value)) + return value + + pos = self.get_node_pos(string_key) + for key in self._sorted_keys[pos:]: + val = distinct_filter(self.ring[key]) + if val: + yield val + + for i, key in enumerate(self._sorted_keys): + if i < pos: + val = distinct_filter(self.ring[key]) + if val: + yield val + + def gen_key(self, key): + """Given a string key it returns a long value, + this long value represents a place on the hash ring. + + md5 is currently used because it mixes well. + """ + b_key = self._hash_digest(key) + return self._hash_val(b_key, lambda x: x) + + def _hash_val(self, b_key, entry_fn): + return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( + b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] + + def _hash_digest(self, key): + m = md5_constructor() + key = key.encode() + m.update(key) + return m.digest() + + +if __name__ == '__main__': + from collections import defaultdict + servers = [ + '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', + '192.168.0.249:11212' + ] + + ring = HashRing(servers) + keys = ['{}'.format(i) for i in range(100)] + mapped = defaultdict(list) + for k in keys: + server = ring.get_node(k) + mapped[server].append(k) + + for k, v in mapped.items(): + print(k, v) diff --git a/shards/mishards/main.py b/shards/mishards/main.py new file mode 100644 index 0000000000..c0d142607b --- /dev/null +++ b/shards/mishards/main.py @@ -0,0 +1,15 @@ +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from mishards import (settings, create_app) + + +def main(): + server = create_app(settings.DefaultConfig) + server.run(port=settings.SERVER_PORT) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/shards/mishards/models.py b/shards/mishards/models.py new file mode 100644 index 0000000000..4b6c8f9ef4 --- /dev/null +++ b/shards/mishards/models.py @@ -0,0 +1,76 @@ +import logging +from sqlalchemy import (Integer, Boolean, Text, + String, BigInteger, and_, or_, + Column) +from sqlalchemy.orm import relationship, backref + +from mishards import db + +logger = logging.getLogger(__name__) + + +class TableFiles(db.Model): + FILE_TYPE_NEW = 0 + FILE_TYPE_RAW = 1 + FILE_TYPE_TO_INDEX = 2 + FILE_TYPE_INDEX = 3 + FILE_TYPE_TO_DELETE = 4 + FILE_TYPE_NEW_MERGE = 5 + FILE_TYPE_NEW_INDEX = 6 + FILE_TYPE_BACKUP = 7 + + __tablename__ = 'TableFiles' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50)) + engine_type = Column(Integer) + file_id = Column(String(50)) + file_type = Column(Integer) + file_size = Column(Integer, default=0) + row_count = Column(Integer, default=0) + updated_time = Column(BigInteger) + created_on = Column(BigInteger) + date = Column(Integer) + + table = relationship( + 'Tables', + primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', + backref=backref('files', uselist=True, lazy='dynamic') + ) + + +class Tables(db.Model): + TO_DELETE = 1 + NORMAL = 0 + + __tablename__ = 'Tables' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50), unique=True) + state = Column(Integer) + dimension = Column(Integer) + created_on = Column(Integer) + flag = Column(Integer, default=0) + index_file_size = Column(Integer) + engine_type = Column(Integer) + nlist = Column(Integer) + metric_type = Column(Integer) + + def files_to_search(self, date_range=None): + cond = or_( + TableFiles.file_type == TableFiles.FILE_TYPE_RAW, + TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, + ) + if date_range: + cond = and_( + cond, + or_( + and_(TableFiles.date >= d[0], TableFiles.date < d[1]) for d in date_range + ) + ) + + files = self.files.filter(cond) + + logger.debug('DATE_RANGE: {}'.format(date_range)) + return files diff --git a/shards/mishards/router/__init__.py b/shards/mishards/router/__init__.py new file mode 100644 index 0000000000..4150f3b736 --- /dev/null +++ b/shards/mishards/router/__init__.py @@ -0,0 +1,22 @@ +from mishards import exceptions + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py new file mode 100644 index 0000000000..a8f85c0df8 --- /dev/null +++ b/shards/mishards/router/factory.py @@ -0,0 +1,17 @@ +import os +import logging +from utils.plugins import BaseMixin + +logger = logging.getLogger(__name__) +PLUGIN_PACKAGE_NAME = 'mishards.router.plugins' + + +class RouterFactory(BaseMixin): + PLUGIN_TYPE = 'Router' + + def __init__(self, searchpath=None): + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) + + def _create(self, plugin_class, **kwargs): + router = plugin_class.Create(**kwargs) + return router diff --git a/shards/mishards/router/plugins/file_based_hash_ring_router.py b/shards/mishards/router/plugins/file_based_hash_ring_router.py new file mode 100644 index 0000000000..b90935129e --- /dev/null +++ b/shards/mishards/router/plugins/file_based_hash_ring_router.py @@ -0,0 +1,64 @@ +import logging +from sqlalchemy import exc as sqlalchemy_exc +from sqlalchemy import and_ +from mishards.models import Tables +from mishards.router import RouterMixin +from mishards import exceptions, db +from mishards.hash_ring import HashRing + +logger = logging.getLogger(__name__) + + +class Factory(RouterMixin): + name = 'FileBasedHashRingRouter' + + def __init__(self, conn_mgr, **kwargs): + super(Factory, self).__init__(conn_mgr) + + def routing(self, table_name, metadata=None, **kwargs): + range_array = kwargs.pop('range_array', None) + return self._route(table_name, range_array, metadata, **kwargs) + + def _route(self, table_name, range_array, metadata=None, **kwargs): + # PXU TODO: Implement Thread-local Context + # PXU TODO: Session life mgt + try: + table = db.Session.query(Tables).filter( + and_(Tables.table_id == table_name, + Tables.state != Tables.TO_DELETE)).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) + + if not table: + raise exceptions.TableNotFoundError(table_name, metadata=metadata) + files = table.files_to_search(range_array) + db.remove_session() + + servers = self.conn_mgr.conn_names + logger.info('Available servers: {}'.format(servers)) + + ring = HashRing(servers) + + routing = {} + + for f in files: + target_host = ring.get_node(str(f.id)) + sub = routing.get(target_host, None) + if not sub: + routing[target_host] = {'table_id': table_name, 'file_ids': []} + routing[target_host]['file_ids'].append(str(f.id)) + + return routing + + @classmethod + def Create(cls, **kwargs): + conn_mgr = kwargs.pop('conn_mgr', None) + if not conn_mgr: + raise RuntimeError('Cannot find \'conn_mgr\' to initialize \'{}\''.format(self.name)) + router = cls(conn_mgr, **kwargs) + return router + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(Factory) diff --git a/shards/mishards/server.py b/shards/mishards/server.py new file mode 100644 index 0000000000..599a00e455 --- /dev/null +++ b/shards/mishards/server.py @@ -0,0 +1,122 @@ +import logging +import grpc +import time +import socket +import inspect +from urllib.parse import urlparse +from functools import wraps +from concurrent import futures +from grpc._cython import cygrpc +from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from mishards.grpc_utils import is_grpc_method +from mishards.service_handler import ServiceHandler +from mishards import settings + +logger = logging.getLogger(__name__) + + +class Server: + def __init__(self): + self.pre_run_handlers = set() + self.grpc_methods = set() + self.error_handlers = {} + self.exit_flag = False + + def init_app(self, + conn_mgr, + tracer, + router, + discover, + port=19530, + max_workers=10, + **kwargs): + self.port = int(port) + self.conn_mgr = conn_mgr + self.tracer = tracer + self.router = router + self.discover = discover + + self.server_impl = grpc.server( + thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), + options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), + (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) + + self.server_impl = self.tracer.decorate(self.server_impl) + + self.register_pre_run_handler(self.pre_run_handler) + + def pre_run_handler(self): + woserver = settings.WOSERVER + url = urlparse(woserver) + ip = socket.gethostbyname(url.hostname) + socket.inet_pton(socket.AF_INET, ip) + self.conn_mgr.register( + 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + + def register_pre_run_handler(self, func): + logger.info('Regiterring {} into server pre_run_handlers'.format(func)) + self.pre_run_handlers.add(func) + return func + + def wrap_method_with_errorhandler(self, func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if e.__class__ in self.error_handlers: + return self.error_handlers[e.__class__](e) + raise + + return wrapper + + def errorhandler(self, exception): + if inspect.isclass(exception) and issubclass(exception, Exception): + + def wrapper(func): + self.error_handlers[exception] = func + return func + + return wrapper + return exception + + def on_pre_run(self): + for handler in self.pre_run_handlers: + handler() + self.discover.start() + + def start(self, port=None): + handler_class = self.decorate_handler(ServiceHandler) + add_MilvusServiceServicer_to_server( + handler_class(tracer=self.tracer, + router=self.router), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format( + str(port or self.port))) + self.server_impl.start() + + def run(self, port): + logger.info('Milvus server start ......') + port = port or self.port + self.on_pre_run() + + self.start(port) + logger.info('Listening on port {}'.format(port)) + + try: + while not self.exit_flag: + time.sleep(5) + except KeyboardInterrupt: + self.stop() + + def stop(self): + logger.info('Server is shuting down ......') + self.exit_flag = True + self.server_impl.stop(0) + self.tracer.close() + logger.info('Server is closed') + + def decorate_handler(self, handler): + for key, attr in handler.__dict__.items(): + if is_grpc_method(attr): + setattr(handler, key, self.wrap_method_with_errorhandler(attr)) + return handler diff --git a/shards/mishards/service_handler.py b/shards/mishards/service_handler.py new file mode 100644 index 0000000000..2f19152ae6 --- /dev/null +++ b/shards/mishards/service_handler.py @@ -0,0 +1,475 @@ +import logging +import time +import datetime +from collections import defaultdict + +import multiprocessing +from concurrent.futures import ThreadPoolExecutor +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client.abstract import Range +from milvus.client import types as Types + +from mishards import (db, settings, exceptions) +from mishards.grpc_utils import mark_grpc_method +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards import utilities + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + MAX_NPROBE = 2048 + MAX_TOPK = 2048 + + def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): + self.table_meta = {} + self.error_handlers = {} + self.tracer = tracer + self.router = router + self.max_workers = max_workers + + def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): + status = status_pb2.Status(error_code=status_pb2.SUCCESS, + reason="Success") + if not files_n_topk_results: + return status, [] + + request_results = defaultdict(list) + + calc_time = time.time() + for files_collection in files_n_topk_results: + if isinstance(files_collection, tuple): + status, _ = files_collection + return status, [] + for request_pos, each_request_results in enumerate( + files_collection.topk_query_result): + request_results[request_pos].extend( + each_request_results.query_result_arrays) + request_results[request_pos] = sorted( + request_results[request_pos], + key=lambda x: x.distance, + reverse=reverse)[:topk] + + calc_time = time.time() - calc_time + logger.info('Merge takes {}'.format(calc_time)) + + results = sorted(request_results.items()) + topk_query_result = [] + + for result in results: + query_result = TopKQueryResult(query_result_arrays=result[1]) + topk_query_result.append(query_result) + + return status, topk_query_result + + def _do_query(self, + context, + table_id, + table_meta, + vectors, + topk, + nprobe, + range_array=None, + **kwargs): + metadata = kwargs.get('metadata', None) + range_array = [ + utilities.range_to_date(r, metadata=metadata) for r in range_array + ] if range_array else None + + routing = {} + p_span = None if self.tracer.empty else context.get_active_span( + ).context + with self.tracer.start_span('get_routing', child_of=p_span): + routing = self.router.routing(table_id, + range_array=range_array, + metadata=metadata) + logger.info('Routing: {}'.format(routing)) + + metadata = kwargs.get('metadata', None) + + rs = [] + all_topk_results = [] + + def search(addr, query_params, vectors, topk, nprobe, **kwargs): + logger.info( + 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' + .format(addr, query_params, len(vectors), topk, nprobe)) + + conn = self.router.query_conn(addr, metadata=metadata) + start = time.time() + span = kwargs.get('span', None) + span = span if span else (None if self.tracer.empty else + context.get_active_span().context) + + with self.tracer.start_span('search_{}'.format(addr), + child_of=span): + ret = conn.search_vectors_in_files( + table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy_=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) + + all_topk_results.append(ret) + + with self.tracer.start_span('do_search', child_of=p_span) as span: + with ThreadPoolExecutor(max_workers=self.max_workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, + addr, + params, + vectors, + topk, + nprobe, + span=span) + rs.append(res) + + for res in rs: + res.result() + + reverse = table_meta.metric_type == Types.MetricType.IP + with self.tracer.start_span('do_merge', child_of=p_span): + return self._do_merge(all_topk_results, + topk, + reverse=reverse, + metadata=metadata) + + def _create_table(self, table_schema): + return self.router.connection().create_table(table_schema) + + @mark_grpc_method + def CreateTable(self, request, context): + _status, _table_schema = Parser.parse_proto_TableSchema(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('CreateTable {}'.format(_table_schema['table_name'])) + + _status = self._create_table(_table_schema) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _has_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).has_table(table_name) + + @mark_grpc_method + def HasTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=False) + + logger.info('HasTable {}'.format(_table_name)) + + _status, _bool = self._has_table(_table_name, + metadata={'resp_class': milvus_pb2.BoolReply}) + + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=_bool) + + def _delete_table(self, table_name): + return self.router.connection().delete_table(table_name) + + @mark_grpc_method + def DropTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropTable {}'.format(_table_name)) + + _status = self._delete_table(_table_name) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _create_index(self, table_name, index): + return self.router.connection().create_index(table_name, index) + + @mark_grpc_method + def CreateIndex(self, request, context): + _status, unpacks = Parser.parse_proto_IndexParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _index = unpacks + + logger.info('CreateIndex {}'.format(_table_name)) + + # TODO: interface create_table incompleted + _status = self._create_index(_table_name, _index) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _add_vectors(self, param, metadata=None): + return self.router.connection(metadata=metadata).add_vectors( + None, None, insert_param=param) + + @mark_grpc_method + def Insert(self, request, context): + logger.info('Insert') + # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' + _status, _ids = self._add_vectors( + metadata={'resp_class': milvus_pb2.VectorIds}, param=request) + return milvus_pb2.VectorIds(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + vector_id_array=_ids) + + @mark_grpc_method + def Search(self, request, context): + + table_name = request.table_name + + topk = request.topk + nprobe = request.nprobe + + logger.info('Search {}: topk={} nprobe={}'.format( + table_name, topk, nprobe)) + + metadata = {'resp_class': milvus_pb2.TopKQueryResultList} + + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.InvalidArgumentError( + message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) + + if topk > self.MAX_TOPK or topk <= 0: + raise exceptions.InvalidTopKError( + message='Invalid topk: {}'.format(topk), metadata=metadata) + + table_meta = self.table_meta.get(table_name, None) + + if not table_meta: + status, info = self.router.connection( + metadata=metadata).describe_table(table_name) + if not status.OK(): + raise exceptions.TableNotFoundError(table_name, + metadata=metadata) + + self.table_meta[table_name] = info + table_meta = info + + start = time.time() + + query_record_array = [] + + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) + + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + + status, results = self._do_query(context, + table_name, + table_meta, + query_record_array, + topk, + nprobe, + query_range_array, + metadata=metadata) + + now = time.time() + logger.info('SearchVector takes: {}'.format(now - start)) + + topk_result_list = milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status.error_code, + reason=status.reason), + topk_query_result=results) + return topk_result_list + + @mark_grpc_method + def SearchInFiles(self, request, context): + raise NotImplemented() + + def _describe_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_table(table_name) + + @mark_grpc_method + def DescribeTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.TableSchema(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), ) + + metadata = {'resp_class': milvus_pb2.TableSchema} + + logger.info('DescribeTable {}'.format(_table_name)) + _status, _table = self._describe_table(metadata=metadata, + table_name=_table_name) + + if _status.OK(): + return milvus_pb2.TableSchema( + table_name=_table_name, + index_file_size=_table.index_file_size, + dimension=_table.dimension, + metric_type=_table.metric_type, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + return milvus_pb2.TableSchema( + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + def _count_table(self, table_name, metadata=None): + return self.router.connection( + metadata=metadata).get_table_row_count(table_name) + + @mark_grpc_method + def CountTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + status = status_pb2.Status(error_code=_status.code, + reason=_status.message) + + return milvus_pb2.TableRowCount(status=status) + + logger.info('CountTable {}'.format(_table_name)) + + metadata = {'resp_class': milvus_pb2.TableRowCount} + _status, _count = self._count_table(_table_name, metadata=metadata) + + return milvus_pb2.TableRowCount( + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + table_row_count=_count if isinstance(_count, int) else -1) + + def _get_server_version(self, metadata=None): + return self.router.connection(metadata=metadata).server_version() + + @mark_grpc_method + def Cmd(self, request, context): + _status, _cmd = Parser.parse_proto_Command(request) + logger.info('Cmd: {}'.format(_cmd)) + + if not _status.OK(): + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.StringReply} + + if _cmd == 'version': + _status, _reply = self._get_server_version(metadata=metadata) + else: + _status, _reply = self.router.connection( + metadata=metadata).server_status() + + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + string_reply=_reply) + + def _show_tables(self, metadata=None): + return self.router.connection(metadata=metadata).show_tables() + + @mark_grpc_method + def ShowTables(self, request, context): + logger.info('ShowTables') + metadata = {'resp_class': milvus_pb2.TableName} + _status, _results = self._show_tables(metadata=metadata) + + return milvus_pb2.TableNameList(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_names=_results) + + def _delete_by_range(self, table_name, start_date, end_date): + return self.router.connection().delete_vectors_by_range(table_name, + start_date, + end_date) + + @mark_grpc_method + def DeleteByRange(self, request, context): + _status, unpacks = \ + Parser.parse_proto_DeleteByRangeParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _start_date, _end_date = unpacks + + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, + _end_date)) + _status = self._delete_by_range(_table_name, _start_date, _end_date) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _preload_table(self, table_name): + return self.router.connection().preload_table(table_name) + + @mark_grpc_method + def PreloadTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('PreloadTable {}'.format(_table_name)) + _status = self._preload_table(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _describe_index(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_index(table_name) + + @mark_grpc_method + def DescribeIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.IndexParam} + + logger.info('DescribeIndex {}'.format(_table_name)) + _status, _index_param = self._describe_index(table_name=_table_name, + metadata=metadata) + + if not _index_param: + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + _index = milvus_pb2.Index(index_type=_index_param._index_type, + nlist=_index_param._nlist) + + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_name=_table_name, + index=_index) + + def _drop_index(self, table_name): + return self.router.connection().drop_index(table_name) + + @mark_grpc_method + def DropIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropIndex {}'.format(_table_name)) + _status = self._drop_index(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py new file mode 100644 index 0000000000..8d7361dddc --- /dev/null +++ b/shards/mishards/settings.py @@ -0,0 +1,69 @@ +import sys +import os + +from environs import Env +env = Env() + +FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) +if FROM_EXAMPLE: + from dotenv import load_dotenv + load_dotenv('./mishards/.env.example') +else: + env.read_env() + + +DEBUG = env.bool('DEBUG', False) +MAX_RETRY = env.int('MAX_RETRY', 3) + +LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') +LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') +LOG_NAME = env.str('LOG_NAME', 'logfile') +TIMEZONE = env.str('TIMEZONE', 'UTC') + +from utils.logger_helper import config +config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) + +SERVER_PORT = env.int('SERVER_PORT', 19530) +SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) +WOSERVER = env.str('WOSERVER') + + +class TracingConfig: + TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') + TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) + TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "1"), + }, + 'local_agent': { + 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), + 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') + }, + 'logging': env.bool('TRACING_LOGGING', True) + } + DEFAULT_TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "0"), + } + } + + +class DefaultConfig: + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') + SQL_ECHO = env.bool('SQL_ECHO', False) + TRACER_PLUGIN_PATH = env.str('TRACER_PLUGIN_PATH', '') + TRACER_CLASS_NAME = env.str('TRACER_CLASS_NAME', '') + ROUTER_PLUGIN_PATH = env.str('ROUTER_PLUGIN_PATH', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') + DISCOVERY_PLUGIN_PATH = env.str('DISCOVERY_PLUGIN_PATH', '') + DISCOVERY_CLASS_NAME = env.str('DISCOVERY_CLASS_NAME', 'static') + + +class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + TRACER_CLASS_NAME = env.str('TRACER_CLASS_TEST_NAME', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') diff --git a/shards/mishards/test_connections.py b/shards/mishards/test_connections.py new file mode 100644 index 0000000000..819d2e03da --- /dev/null +++ b/shards/mishards/test_connections.py @@ -0,0 +1,101 @@ +import logging +import pytest +import mock + +from milvus import Milvus +from mishards.connections import (ConnectionMgr, Connection) +from mishards import exceptions + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestConnection: + def test_manager(self): + mgr = ConnectionMgr() + + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', '2222') + assert len(mgr.conn_names) == 2 + + mgr.unregister('pod1') + assert len(mgr.conn_names) == 1 + + mgr.unregister('pod2') + assert len(mgr.conn_names) == 0 + + mgr.register('WOSERVER', 'xxxx') + assert len(mgr.conn_names) == 0 + + assert not mgr.conn('XXXX', None) + with pytest.raises(exceptions.ConnectionNotFoundError): + mgr.conn('XXXX', None, True) + + mgr.conn('WOSERVER', None) + + def test_connection(self): + class Conn: + def __init__(self, state): + self.state = state + + def connect(self, uri): + return self.state + + def connected(self): + return self.state + + FAIL_CONN = Conn(False) + PASS_CONN = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + logger.info('Retrying {}'.format(self.times)) + + class Func(): + def __init__(self): + self.executed = False + + def __call__(self): + self.executed = True + + max_retry = 3 + + RetryObj = Retry() + + c = Connection('client', + uri='xx', + max_retry=max_retry, + on_retry_func=RetryObj) + c.conn = FAIL_CONN + ff = Func() + this_connect = c.connect(func=ff) + with pytest.raises(exceptions.ConnectionConnectError): + this_connect() + assert RetryObj.times == max_retry + assert not ff.executed + RetryObj = Retry() + + c.conn = PASS_CONN + this_connect = c.connect(func=ff) + this_connect() + assert ff.executed + assert RetryObj.times == 0 + + this_connect = c.connect(func=None) + with pytest.raises(TypeError): + this_connect() + + errors = [] + + def error_handler(err): + errors.append(err) + + this_connect = c.connect(func=None, exception_handler=error_handler) + this_connect() + assert len(errors) == 1 diff --git a/shards/mishards/test_models.py b/shards/mishards/test_models.py new file mode 100644 index 0000000000..d60b62713e --- /dev/null +++ b/shards/mishards/test_models.py @@ -0,0 +1,39 @@ +import logging +import pytest +from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory +from mishards import db, create_app, settings +from mishards.factories import ( + Tables, TableFiles, + TablesFactory, TableFilesFactory +) + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestModels: + def test_files_to_search(self): + table = TablesFactory() + new_files_cnt = 5 + to_index_cnt = 10 + raw_cnt = 20 + backup_cnt = 12 + to_delete_cnt = 9 + index_cnt = 8 + new_index_cnt = 6 + new_merge_cnt = 11 + + new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) + to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) + raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) + backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) + index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) + new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) + new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) + to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) + assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt + + assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt + assert table.files_to_search([(111, 120)]).count() == 0 + assert table.files_to_search([(111, 121)]).count() == raw_cnt + assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/shards/mishards/test_server.py b/shards/mishards/test_server.py new file mode 100644 index 0000000000..f0cde2184c --- /dev/null +++ b/shards/mishards/test_server.py @@ -0,0 +1,279 @@ +import logging +import pytest +import mock +import datetime +import random +import faker +import inspect +from milvus import Milvus +from milvus.client.types import Status, IndexType, MetricType +from milvus.client.abstract import IndexParam, TableSchema +from milvus.grpc_gen import status_pb2, milvus_pb2 +from mishards import db, create_app, settings +from mishards.service_handler import ServiceHandler +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables +from mishards.router import RouterMixin + +logger = logging.getLogger(__name__) + +OK = Status(code=Status.SUCCESS, message='Success') +BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') + + +@pytest.mark.usefixtures('started_app') +class TestServer: + @property + def client(self): + m = Milvus() + m.connect(host='localhost', port=settings.SERVER_TEST_PORT) + return m + + def test_server_start(self, started_app): + assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER + + def test_cmd(self, started_app): + ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, + '')) + status, _ = self.client.server_version() + assert status.OK() + + Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) + status, _ = self.client.server_version() + assert not status.OK() + + def test_drop_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + ServiceHandler._drop_index = mock.MagicMock(return_value=OK) + status = self.client.drop_index(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.drop_index(table_name) + assert not status.OK() + + def test_describe_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + index_type = IndexType.FLAT + nlist = 1 + index_param = IndexParam(table_name=table_name, + index_type=index_type, + nlist=nlist) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._describe_index = mock.MagicMock( + return_value=(OK, index_param)) + status, ret = self.client.describe_index(table_name) + assert status.OK() + assert ret._table_name == index_param._table_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.describe_index(table_name) + assert not status.OK() + + def test_preload(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._preload_table = mock.MagicMock(return_value=OK) + status = self.client.preload_table(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.preload_table(table_name) + assert not status.OK() + + @pytest.mark.skip + def test_delete_by_range(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + unpacked = table_name, datetime.datetime.today( + ), datetime.datetime.today() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(OK, unpacked)) + ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) + status = self.client.delete_vectors_by_range( + *unpacked) + assert status.OK() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(BAD, unpacked)) + status = self.client.delete_vectors_by_range( + *unpacked) + assert not status.OK() + + def test_count_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + count = random.randint(100, 200) + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) + status, ret = self.client.get_table_row_count(table_name) + assert status.OK() + assert ret == count + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.get_table_row_count(table_name) + assert not status.OK() + + def test_show_tables(self, started_app): + tables = ['t1', 't2'] + ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) + status, ret = self.client.show_tables() + assert status.OK() + assert ret == tables + + def test_describe_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + nlist = 1 + table_schema = TableSchema(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_schema.table_name)) + ServiceHandler._describe_table = mock.MagicMock( + return_value=(OK, table_schema)) + status, _ = self.client.describe_table(table_name) + assert status.OK() + + ServiceHandler._describe_table = mock.MagicMock( + return_value=(BAD, table_schema)) + status, _ = self.client.describe_table(table_name) + assert not status.OK() + + Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, + 'cmd')) + status, ret = self.client.describe_table(table_name) + assert not status.OK() + + def test_insert(self, started_app): + table_name = inspect.currentframe().f_code.co_name + vectors = [[random.random() for _ in range(16)] for _ in range(10)] + ids = [random.randint(1000000, 20000000) for _ in range(10)] + ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) + status, ret = self.client.add_vectors( + table_name=table_name, records=vectors) + assert status.OK() + assert ids == ret + + def test_create_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + unpacks = table_name, None + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, + unpacks)) + ServiceHandler._create_index = mock.MagicMock(return_value=OK) + status = self.client.create_index(table_name=table_name) + assert status.OK() + + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_index(table_name=table_name) + assert not status.OK() + + def test_drop_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._delete_table = mock.MagicMock(return_value=OK) + status = self.client.delete_table(table_name=table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.delete_table(table_name=table_name) + assert not status.OK() + + def test_has_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) + has = self.client.has_table(table_name=table_name) + assert has + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, has = self.client.has_table(table_name=table_name) + assert not status.OK() + assert not has + + def test_create_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + table_schema = dict(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + + ServiceHandler._create_table = mock.MagicMock(return_value=OK) + status = self.client.create_table(table_schema) + assert status.OK() + + Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_table(table_schema) + assert not status.OK() + + def random_data(self, n, dimension): + return [[random.random() for _ in range(dimension)] for _ in range(n)] + + def test_search(self, started_app): + table_name = inspect.currentframe().f_code.co_name + to_index_cnt = random.randint(10, 20) + table = TablesFactory(table_id=table_name, state=Tables.NORMAL) + to_index_files = TableFilesFactory.create_batch( + to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) + topk = random.randint(5, 10) + nq = random.randint(5, 10) + param = { + 'table_name': table_name, + 'query_records': self.random_data(nq, table.dimension), + 'top_k': topk, + 'nprobe': 2049 + } + + result = [ + milvus_pb2.TopKQueryResult(query_result_arrays=[ + milvus_pb2.QueryResult(id=i, distance=random.random()) + for i in range(topk) + ]) for i in range(nq) + ] + + mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=result) + + table_schema = TableSchema(table_name=table_name, + index_file_size=table.index_file_size, + metric_type=table.metric_type, + dimension=table.dimension) + + status, _ = self.client.search_vectors(**param) + assert status.code == Status.ILLEGAL_ARGUMENT + + param['nprobe'] = 2048 + RouterMixin.connection = mock.MagicMock(return_value=Milvus()) + RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) + Milvus.describe_table = mock.MagicMock(return_value=(BAD, + table_schema)) + status, ret = self.client.search_vectors(**param) + assert status.code == Status.TABLE_NOT_EXISTS + + Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) + Milvus.search_vectors_in_files = mock.MagicMock( + return_value=mock_results) + + status, ret = self.client.search_vectors(**param) + assert status.OK() + assert len(ret) == nq diff --git a/shards/mishards/utilities.py b/shards/mishards/utilities.py new file mode 100644 index 0000000000..42e982b5f1 --- /dev/null +++ b/shards/mishards/utilities.py @@ -0,0 +1,20 @@ +import datetime +from mishards import exceptions + + +def format_date(start, end): + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, + (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) + + +def range_to_date(range_obj, metadata=None): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start < end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date), + metadata=metadata) + + return format_date(start, end) diff --git a/shards/requirements.txt b/shards/requirements.txt new file mode 100644 index 0000000000..14bdde2a06 --- /dev/null +++ b/shards/requirements.txt @@ -0,0 +1,37 @@ +environs==4.2.0 +factory-boy==2.12.0 +Faker==1.0.7 +fire==0.1.3 +google-auth==1.6.3 +grpcio==1.22.0 +grpcio-tools==1.22.0 +kubernetes==10.0.1 +MarkupSafe==1.1.1 +marshmallow==2.19.5 +pymysql==0.9.3 +protobuf==3.9.1 +py==1.8.0 +pyasn1==0.4.7 +pyasn1-modules==0.2.6 +pylint==2.3.1 +pymilvus-test==0.2.28 +#pymilvus==0.2.0 +pyparsing==2.4.0 +pytest==4.6.3 +pytest-level==0.1.1 +pytest-print==0.1.2 +pytest-repeat==0.8.0 +pytest-timeout==1.3.3 +python-dateutil==2.8.0 +python-dotenv==0.10.3 +pytz==2019.1 +requests==2.22.0 +requests-oauthlib==1.2.0 +rsa==4.0 +six==1.12.0 +SQLAlchemy==1.3.5 +urllib3==1.25.3 +jaeger-client>=3.4.0 +grpcio-opentracing>=1.0 +mock==2.0.0 +pluginbase==1.0.0 diff --git a/shards/setup.cfg b/shards/setup.cfg new file mode 100644 index 0000000000..4a88432914 --- /dev/null +++ b/shards/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +testpaths = mishards +log_cli=true +log_cli_level=info diff --git a/shards/tracer/__init__.py b/shards/tracer/__init__.py new file mode 100644 index 0000000000..64a5b50d15 --- /dev/null +++ b/shards/tracer/__init__.py @@ -0,0 +1,43 @@ +from contextlib import contextmanager + + +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server + + +@contextmanager +def EmptySpan(*args, **kwargs): + yield None + return + + +class Tracer: + def __init__(self, + tracer=None, + interceptor=None, + server_decorator=empty_server_interceptor_decorator): + self.tracer = tracer + self.interceptor = interceptor + self.server_decorator = server_decorator + + def decorate(self, server): + return self.server_decorator(server, self.interceptor) + + @property + def empty(self): + return self.tracer is None + + def close(self): + self.tracer and self.tracer.close() + + def start_span(self, + operation_name=None, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False): + if self.empty: + return EmptySpan() + return self.tracer.start_span(operation_name, child_of, references, + tags, start_time, ignore_active_span) diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py new file mode 100644 index 0000000000..0e54a5aeb6 --- /dev/null +++ b/shards/tracer/factory.py @@ -0,0 +1,27 @@ +import os +import logging +from tracer import Tracer +from utils.plugins import BaseMixin + +logger = logging.getLogger(__name__) +PLUGIN_PACKAGE_NAME = 'tracer.plugins' + + +class TracerFactory(BaseMixin): + PLUGIN_TYPE = 'Tracer' + + def __init__(self, searchpath=None): + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) + + def create(self, class_name, **kwargs): + if not class_name: + return Tracer() + return super().create(class_name, **kwargs) + + def _create(self, plugin_class, **kwargs): + plugin_config = kwargs.pop('plugin_config', None) + if not plugin_config: + raise RuntimeError('\'{}\' Plugin Config is Required!'.format(self.PLUGIN_TYPE)) + + plugin = plugin_class.Create(plugin_config=plugin_config, **kwargs) + return plugin diff --git a/shards/tracer/plugins/jaeger_factory.py b/shards/tracer/plugins/jaeger_factory.py new file mode 100644 index 0000000000..923f2f805d --- /dev/null +++ b/shards/tracer/plugins/jaeger_factory.py @@ -0,0 +1,35 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor +from tracer import Tracer + +logger = logging.getLogger(__name__) + +PLUGIN_NAME = __file__ + + +class JaegerFactory: + name = 'jaeger' + @classmethod + def Create(cls, plugin_config, **kwargs): + tracing_config = plugin_config.TRACING_CONFIG + span_decorator = kwargs.pop('span_decorator', None) + service_name = plugin_config.TRACING_SERVICE_NAME + validate = plugin_config.TRACING_VALIDATE + config = Config(config=tracing_config, + service_name=service_name, + validate=validate) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=plugin_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(PLUGIN_NAME, app.plugin_package_name)) + app.on_plugin_setup(JaegerFactory) diff --git a/shards/utils/__init__.py b/shards/utils/__init__.py new file mode 100644 index 0000000000..cf444c0680 --- /dev/null +++ b/shards/utils/__init__.py @@ -0,0 +1,18 @@ +from functools import wraps + + +def singleton(cls): + instances = {} + @wraps(cls) + def getinstance(*args, **kw): + if cls not in instances: + instances[cls] = cls(*args, **kw) + return instances[cls] + return getinstance + + +class dotdict(dict): + """dot.notation access to dictionary attributes""" + __getattr__ = dict.get + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ diff --git a/shards/utils/logger_helper.py b/shards/utils/logger_helper.py new file mode 100644 index 0000000000..b4e3b9c5b6 --- /dev/null +++ b/shards/utils/logger_helper.py @@ -0,0 +1,152 @@ +import os +import datetime +from pytz import timezone +from logging import Filter +import logging.config + + +class InfoFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.INFO + + +class DebugFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.DEBUG + + +class WarnFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.WARN + + +class ErrorFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.ERROR + + +class CriticalFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.CRITICAL + + +COLORS = { + 'HEADER': '\033[95m', + 'INFO': '\033[92m', + 'DEBUG': '\033[94m', + 'WARNING': '\033[93m', + 'ERROR': '\033[95m', + 'CRITICAL': '\033[91m', + 'ENDC': '\033[0m', +} + + +class ColorFulFormatColMixin: + def format_col(self, message_str, level_name): + if level_name in COLORS.keys(): + message_str = COLORS.get(level_name) + message_str + COLORS.get( + 'ENDC') + return message_str + + +class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): + def format(self, record): + message_str = super(ColorfulFormatter, self).format(record) + + return self.format_col(message_str, level_name=record.levelname) + + +def config(log_level, log_path, name, tz='UTC'): + def build_log_file(level, log_path, name, tz): + utc_now = datetime.datetime.utcnow() + utc_tz = timezone('UTC') + local_tz = timezone(tz) + tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) + return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), + level) + + if not os.path.exists(log_path): + os.makedirs(log_path) + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'default': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + }, + 'colorful_console': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + '()': ColorfulFormatter, + }, + }, + 'filters': { + 'InfoFilter': { + '()': InfoFilter, + }, + 'DebugFilter': { + '()': DebugFilter, + }, + 'WarnFilter': { + '()': WarnFilter, + }, + 'ErrorFilter': { + '()': ErrorFilter, + }, + 'CriticalFilter': { + '()': CriticalFilter, + }, + }, + 'handlers': { + 'milvus_celery_console': { + 'class': 'logging.StreamHandler', + 'formatter': 'colorful_console', + }, + 'milvus_debug_file': { + 'level': 'DEBUG', + 'filters': ['DebugFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('debug', log_path, name, tz) + }, + 'milvus_info_file': { + 'level': 'INFO', + 'filters': ['InfoFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('info', log_path, name, tz) + }, + 'milvus_warn_file': { + 'level': 'WARN', + 'filters': ['WarnFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('warn', log_path, name, tz) + }, + 'milvus_error_file': { + 'level': 'ERROR', + 'filters': ['ErrorFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('error', log_path, name, tz) + }, + 'milvus_critical_file': { + 'level': 'CRITICAL', + 'filters': ['CriticalFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('critical', log_path, name, tz) + }, + }, + 'loggers': { + '': { + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', + 'milvus_error_file', 'milvus_critical_file'], + 'level': log_level, + 'propagate': False + }, + }, + 'propagate': False, + } + + logging.config.dictConfig(LOGGING) diff --git a/shards/utils/pluginextension.py b/shards/utils/pluginextension.py new file mode 100644 index 0000000000..68413a4e55 --- /dev/null +++ b/shards/utils/pluginextension.py @@ -0,0 +1,16 @@ +import importlib.util +from pluginbase import PluginBase, PluginSource + + +class MiPluginSource(PluginSource): + def load_plugin(self, name): + plugin = super().load_plugin(name) + spec = importlib.util.spec_from_file_location(self.base.package + '.' + name, plugin.__file__) + plugin = importlib.util.module_from_spec(spec) + spec.loader.exec_module(plugin) + return plugin + + +class MiPluginBase(PluginBase): + def make_plugin_source(self, *args, **kwargs): + return MiPluginSource(self, *args, **kwargs) diff --git a/shards/utils/plugins/__init__.py b/shards/utils/plugins/__init__.py new file mode 100644 index 0000000000..633f1164a7 --- /dev/null +++ b/shards/utils/plugins/__init__.py @@ -0,0 +1,40 @@ +import os +import inspect +from functools import partial +from utils.pluginextension import MiPluginBase as PluginBase + + +class BaseMixin(object): + + def __init__(self, package_name, searchpath=None): + self.plugin_package_name = package_name + caller_path = os.path.dirname(inspect.stack()[1][1]) + get_path = partial(os.path.join, caller_path) + plugin_base = PluginBase(package=self.plugin_package_name, + searchpath=[get_path('./plugins')]) + self.class_map = {} + searchpath = searchpath if searchpath else [] + searchpath = [searchpath] if isinstance(searchpath, str) else searchpath + self.source = plugin_base.make_plugin_source(searchpath=searchpath, + identifier=self.__class__.__name__) + + for plugin_name in self.source.list_plugins(): + plugin = self.source.load_plugin(plugin_name) + plugin.setup(self) + + def on_plugin_setup(self, plugin_class): + name = getattr(plugin_class, 'name', plugin_class.__name__) + self.class_map[name.lower()] = plugin_class + + def plugin(self, name): + return self.class_map.get(name, None) + + def create(self, class_name, **kwargs): + if not class_name: + raise RuntimeError('Please specify \'{}\' class_name first!'.format(self.PLUGIN_TYPE)) + + plugin_class = self.plugin(class_name.lower()) + if not plugin_class: + raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) + + return self._create(plugin_class, **kwargs)