From f00ca24f1b5ae4f7a32bfde28aad74a37c817bf4 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 14:25:02 +0800 Subject: [PATCH 01/29] Disable cleanup if mode is read only Former-commit-id: 09655e2bb8a52466c1732c829519742867cf07a3 --- cpp/CHANGELOG.md | 1 + cpp/src/db/MySQLMetaImpl.cpp | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index ac3b2c51f6..19fbbe9c0c 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -25,6 +25,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-90 - Fix arch match incorrect on ARM - MS-99 - Fix compilation bug - MS-110 - Avoid huge file size +- MS-148 - Disable cleanup if mode is read only ## Improvement - MS-82 - Update server startup welcome message diff --git a/cpp/src/db/MySQLMetaImpl.cpp b/cpp/src/db/MySQLMetaImpl.cpp index 92bb17168a..20b0cef0c6 100644 --- a/cpp/src/db/MySQLMetaImpl.cpp +++ b/cpp/src/db/MySQLMetaImpl.cpp @@ -162,7 +162,9 @@ namespace meta { ENGINE_LOG_DEBUG << "MySQL connection pool: maximum pool size = " << std::to_string(maxPoolSize); try { - CleanUp(); + if (mode_ != Options::MODE::READ_ONLY) { + CleanUp(); + } { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); @@ -457,7 +459,7 @@ namespace meta { } //Scoped Connection - if (mode_ != Options::MODE::SINGLE) { + if (mode_ == Options::MODE::CLUSTER) { DeleteTableFiles(table_id); } From cba93cf25db6ecda1b527d0f2b023f32dec5b078 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 14:27:19 +0800 Subject: [PATCH 02/29] Disable cleanup if mode is read only Former-commit-id: 8e0119a612c7da87f77c27453144d6666896bdf9 --- cpp/src/db/MySQLMetaImpl.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cpp/src/db/MySQLMetaImpl.cpp b/cpp/src/db/MySQLMetaImpl.cpp index 20b0cef0c6..f2e032dac5 100644 --- a/cpp/src/db/MySQLMetaImpl.cpp +++ b/cpp/src/db/MySQLMetaImpl.cpp @@ -1814,7 +1814,9 @@ namespace meta { MySQLMetaImpl::~MySQLMetaImpl() { // std::lock_guard lock(mysql_mutex); - CleanUp(); + if (mode_ != Options::MODE::READ_ONLY) { + CleanUp(); + } } } // namespace meta From e89f9b18c10f5ee0460ffc2e7b192a2bcce44b91 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 15:48:21 +0800 Subject: [PATCH 03/29] update Former-commit-id: b8255e6455c40e0ee843a56787502b65312b2a4a --- cpp/src/server/RequestTask.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/src/server/RequestTask.cpp b/cpp/src/server/RequestTask.cpp index 1b91883af5..925bc601cc 100644 --- a/cpp/src/server/RequestTask.cpp +++ b/cpp/src/server/RequestTask.cpp @@ -482,6 +482,7 @@ ServerError SearchVectorTask::OnExecute() { engine::QueryResults results; uint64_t record_count = (uint64_t)record_array_.size(); + SERVER_LOG_DEBUG << file_id_array_ << std::endl; if(file_id_array_.empty()) { stat = DBWrapper::DB()->Query(table_name_, (size_t) top_k_, record_count, vec_f.data(), dates, results); } else { From 0b003144a5f34296701db804a3586ac99839c446 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 16:04:00 +0800 Subject: [PATCH 04/29] update Former-commit-id: b75d6fad2d4165cc08ea1b528a9cff36791344b1 --- cpp/src/server/RequestTask.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cpp/src/server/RequestTask.cpp b/cpp/src/server/RequestTask.cpp index 925bc601cc..d2c2474999 100644 --- a/cpp/src/server/RequestTask.cpp +++ b/cpp/src/server/RequestTask.cpp @@ -482,7 +482,11 @@ ServerError SearchVectorTask::OnExecute() { engine::QueryResults results; uint64_t record_count = (uint64_t)record_array_.size(); - SERVER_LOG_DEBUG << file_id_array_ << std::endl; + SERVER_LOG_DEBUG << "file_id_array_: "; + for (auto& file_id : file_id_array_) { + SERVER_LOG_DEBUG << file_id; + } + if(file_id_array_.empty()) { stat = DBWrapper::DB()->Query(table_name_, (size_t) top_k_, record_count, vec_f.data(), dates, results); } else { From fd6d77d62716e771c64b6f0e1eb0b7a3a7fdfed2 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 17:24:36 +0800 Subject: [PATCH 05/29] update Former-commit-id: eb53fcbf6029d7c061d5c87835700b14a1213e63 --- cpp/src/db/DBImpl.cpp | 4 ++++ cpp/src/db/MySQLMetaImpl.cpp | 10 ++++++---- cpp/src/server/RequestHandler.cpp | 2 ++ cpp/src/server/RequestTask.cpp | 5 ----- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/cpp/src/db/DBImpl.cpp b/cpp/src/db/DBImpl.cpp index 0a1e8651e1..352fcae7d0 100644 --- a/cpp/src/db/DBImpl.cpp +++ b/cpp/src/db/DBImpl.cpp @@ -191,6 +191,10 @@ Status DBImpl::Query(const std::string& table_id, const std::vector return status; } + for (auto& file_schema : files_array) { + ENGINE_LOG_DEBUG << "file_id: " << file_schema.file_id_; + } + if(files_array.empty()) { return Status::Error("Invalid file id"); } diff --git a/cpp/src/db/MySQLMetaImpl.cpp b/cpp/src/db/MySQLMetaImpl.cpp index f2e032dac5..5bef070337 100644 --- a/cpp/src/db/MySQLMetaImpl.cpp +++ b/cpp/src/db/MySQLMetaImpl.cpp @@ -1083,10 +1083,10 @@ namespace meta { // } Query getTableFileQuery = connectionPtr->query(); - getTableFileQuery << "SELECT engine_type, file_id, file_type, size, date " << - "FROM TableFiles " << - "WHERE table_id = " << quote << table_id << " AND " << - "(" << idStr << ");"; + getTableFileQuery << "SELECT id, engine_type, file_id, file_type, size, date " << + "FROM TableFiles " << + "WHERE table_id = " << quote << table_id << " AND " << + "(" << idStr << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); @@ -1106,6 +1106,8 @@ namespace meta { TableFileSchema file_schema; + file_schema.id_ = resRow["id"]; + file_schema.table_id_ = table_id; file_schema.engine_type_ = resRow["engine_type"]; diff --git a/cpp/src/server/RequestHandler.cpp b/cpp/src/server/RequestHandler.cpp index 037f80e0db..a4dc182c35 100644 --- a/cpp/src/server/RequestHandler.cpp +++ b/cpp/src/server/RequestHandler.cpp @@ -53,6 +53,7 @@ RequestHandler::SearchVector(std::vector &_return, const std::vector &query_record_array, const std::vector &query_range_array, const int64_t topk) { +// SERVER_LOG_DEBUG << "Entering RequestHandler::SearchVector"; BaseTaskPtr task_ptr = SearchVectorTask::Create(table_name, std::vector(), query_record_array, query_range_array, topk, _return); RequestScheduler::ExecTask(task_ptr); @@ -65,6 +66,7 @@ RequestHandler::SearchVectorInFiles(std::vector<::milvus::thrift::TopKQueryResul const std::vector<::milvus::thrift::RowRecord> &query_record_array, const std::vector<::milvus::thrift::Range> &query_range_array, const int64_t topk) { +// SERVER_LOG_DEBUG << "Entering RequestHandler::SearchVectorInFiles. file_id_array size = " << std::to_string(file_id_array.size()); BaseTaskPtr task_ptr = SearchVectorTask::Create(table_name, file_id_array, query_record_array, query_range_array, topk, _return); RequestScheduler::ExecTask(task_ptr); diff --git a/cpp/src/server/RequestTask.cpp b/cpp/src/server/RequestTask.cpp index d2c2474999..1b91883af5 100644 --- a/cpp/src/server/RequestTask.cpp +++ b/cpp/src/server/RequestTask.cpp @@ -482,11 +482,6 @@ ServerError SearchVectorTask::OnExecute() { engine::QueryResults results; uint64_t record_count = (uint64_t)record_array_.size(); - SERVER_LOG_DEBUG << "file_id_array_: "; - for (auto& file_id : file_id_array_) { - SERVER_LOG_DEBUG << file_id; - } - if(file_id_array_.empty()) { stat = DBWrapper::DB()->Query(table_name_, (size_t) top_k_, record_count, vec_f.data(), dates, results); } else { From ff9a3140c5bf08a8db78058fe9987eee8fed969d Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 18:31:45 +0800 Subject: [PATCH 06/29] update Former-commit-id: 2543b4f174931936a829eba157aec1fab6c0ef84 --- cpp/src/db/DBImpl.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cpp/src/db/DBImpl.cpp b/cpp/src/db/DBImpl.cpp index 352fcae7d0..0a1e8651e1 100644 --- a/cpp/src/db/DBImpl.cpp +++ b/cpp/src/db/DBImpl.cpp @@ -191,10 +191,6 @@ Status DBImpl::Query(const std::string& table_id, const std::vector return status; } - for (auto& file_schema : files_array) { - ENGINE_LOG_DEBUG << "file_id: " << file_schema.file_id_; - } - if(files_array.empty()) { return Status::Error("Invalid file id"); } From 2586e4bf6e74153d83a079e19f2e77b9fe476842 Mon Sep 17 00:00:00 2001 From: zhiru Date: Wed, 3 Jul 2019 18:44:29 +0800 Subject: [PATCH 07/29] update Former-commit-id: 68476662b5f24c76c79ec63f6a9dd556e3008f74 --- cpp/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 8d7186911b..630b86d735 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -26,6 +26,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-99 - Fix compilation bug - MS-110 - Avoid huge file size - MS-148 - Disable cleanup if mode is read only +- MS-149 - Fixed searching only one index file issue in distributed mode ## Improvement - MS-82 - Update server startup welcome message From 7512cc1e062aa3d920fda2be33949f0dd4851eb6 Mon Sep 17 00:00:00 2001 From: yu yunfeng Date: Wed, 3 Jul 2019 20:07:11 +0800 Subject: [PATCH 08/29] acc test Former-commit-id: a7803568fa785d403e7c92b83c66f9f47f7c8f13 --- cpp/src/db/DBMetaImpl.cpp | 14 ++++++++------ cpp/src/db/FaissExecutionEngine.cpp | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cpp/src/db/DBMetaImpl.cpp b/cpp/src/db/DBMetaImpl.cpp index 8c56c863e7..d13899dca0 100644 --- a/cpp/src/db/DBMetaImpl.cpp +++ b/cpp/src/db/DBMetaImpl.cpp @@ -612,7 +612,8 @@ Status DBMetaImpl::GetTableFiles(const std::string& table_id, TableFilesSchema& table_files) { try { table_files.clear(); - auto files = ConnectorPtr->select(columns(&TableFileSchema::file_id_, + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::size_, &TableFileSchema::date_, @@ -631,11 +632,12 @@ Status DBMetaImpl::GetTableFiles(const std::string& table_id, for (auto &file : files) { TableFileSchema file_schema; file_schema.table_id_ = table_id; - file_schema.file_id_ = std::get<0>(file); - file_schema.file_type_ = std::get<1>(file); - file_schema.size_ = std::get<2>(file); - file_schema.date_ = std::get<3>(file); - file_schema.engine_type_ = std::get<4>(file); + file_schema.id_ = std::get<0>(file); + file_schema.file_id_ = std::get<1>(file); + file_schema.file_type_ = std::get<2>(file); + file_schema.size_ = std::get<3>(file); + file_schema.date_ = std::get<4>(file); + file_schema.engine_type_ = std::get<5>(file); file_schema.dimension_ = table_schema.dimension_; GetTableFilePath(file_schema); diff --git a/cpp/src/db/FaissExecutionEngine.cpp b/cpp/src/db/FaissExecutionEngine.cpp index 20bd530e78..201c07dbcf 100644 --- a/cpp/src/db/FaissExecutionEngine.cpp +++ b/cpp/src/db/FaissExecutionEngine.cpp @@ -138,6 +138,7 @@ Status FaissExecutionEngine::Search(long n, auto start_time = METRICS_NOW_TIME; std::shared_ptr ivf_index = std::dynamic_pointer_cast(pIndex_); + //ENGINE_LOG_DEBUG << "Index nlist: " << ivf_index->nlist << ", ntotal: "<< ivf_index->ntotal; if(ivf_index) { ENGINE_LOG_DEBUG << "Index type: IVFFLAT nProbe: " << nprobe_; ivf_index->nprobe = nprobe_; From 6092b1be997c27d89f4858d4e5b239046b0889e8 Mon Sep 17 00:00:00 2001 From: jinhai Date: Wed, 3 Jul 2019 20:23:21 +0800 Subject: [PATCH 09/29] Remove unused code Former-commit-id: 5be2fd19cb451599de0c242b9023c5a15b45e204 --- cpp/src/server/MilvusServer.cpp | 33 ++++++++++++--------------------- cpp/src/server/ServerConfig.h | 1 - 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/cpp/src/server/MilvusServer.cpp b/cpp/src/server/MilvusServer.cpp index 322460968f..452ee3af88 100644 --- a/cpp/src/server/MilvusServer.cpp +++ b/cpp/src/server/MilvusServer.cpp @@ -50,7 +50,6 @@ MilvusServer::StartService() { std::string address = server_config.GetValue(CONFIG_SERVER_ADDRESS, "127.0.0.1"); int32_t port = server_config.GetInt32Value(CONFIG_SERVER_PORT, 19530); std::string protocol = server_config.GetValue(CONFIG_SERVER_PROTOCOL, "binary"); - std::string mode = server_config.GetValue(CONFIG_SERVER_MODE, "thread_pool"); try { DBWrapper::DB();//initialize db @@ -68,30 +67,22 @@ MilvusServer::StartService() { } else if (protocol == "compact") { protocol_factory.reset(new TCompactProtocolFactory()); } else { - //SERVER_LOG_INFO << "Service protocol: " << protocol << " is not supported currently"; + // SERVER_LOG_INFO << "Service protocol: " << protocol << " is not supported currently"; return; } - std::string mode = "thread_pool"; - if (mode == "simple") { - s_server.reset(new TSimpleServer(processor, server_transport, transport_factory, protocol_factory)); - s_server->serve(); - } else if (mode == "thread_pool") { - stdcxx::shared_ptr threadManager(ThreadManager::newSimpleThreadManager()); - stdcxx::shared_ptr threadFactory(new PosixThreadFactory()); - threadManager->threadFactory(threadFactory); - threadManager->start(); + stdcxx::shared_ptr threadManager(ThreadManager::newSimpleThreadManager()); + stdcxx::shared_ptr threadFactory(new PosixThreadFactory()); + threadManager->threadFactory(threadFactory); + threadManager->start(); + + s_server.reset(new ThreadPoolServer(processor, + server_transport, + transport_factory, + protocol_factory, + threadManager)); + s_server->serve(); - s_server.reset(new ThreadPoolServer(processor, - server_transport, - transport_factory, - protocol_factory, - threadManager)); - s_server->serve(); - } else { - //SERVER_LOG_INFO << "Service mode: " << mode << " is not supported currently"; - return; - } } catch (apache::thrift::TException& ex) { std::cout << "ERROR! " << ex.what() << std::endl; kill(0, SIGUSR1); diff --git a/cpp/src/server/ServerConfig.h b/cpp/src/server/ServerConfig.h index 412581bc1f..0ec04eed8c 100644 --- a/cpp/src/server/ServerConfig.h +++ b/cpp/src/server/ServerConfig.h @@ -18,7 +18,6 @@ static const std::string CONFIG_SERVER = "server_config"; static const std::string CONFIG_SERVER_ADDRESS = "address"; static const std::string CONFIG_SERVER_PORT = "port"; static const std::string CONFIG_SERVER_PROTOCOL = "transfer_protocol"; -static const std::string CONFIG_SERVER_MODE = "server_mode"; static const std::string CONFIG_CLUSTER_MODE = "mode"; static const std::string CONFIG_DB = "db_config"; From 55afa772d3dc34ebbbdd093dacc4bd3ad900ad58 Mon Sep 17 00:00:00 2001 From: yu yunfeng Date: Wed, 3 Jul 2019 20:59:36 +0800 Subject: [PATCH 10/29] ADD LOG Former-commit-id: 282768be582212cd6bf364ea1903a9ad716106c0 --- cpp/src/db/scheduler/task/SearchTask.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cpp/src/db/scheduler/task/SearchTask.cpp b/cpp/src/db/scheduler/task/SearchTask.cpp index d04f270331..80c7291753 100644 --- a/cpp/src/db/scheduler/task/SearchTask.cpp +++ b/cpp/src/db/scheduler/task/SearchTask.cpp @@ -167,6 +167,14 @@ std::shared_ptr SearchTask::Execute() { ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set); rc.Record("cluster result"); + + SERVER_LOG_DEBUG << "Query Result: "; + for(auto& id2score_vector: result_set) { + for(auto& pair: id2score_vector) { + SERVER_LOG_DEBUG << "id: " << pair.first << ", distance: " << pair.second; + } + } + //step 4: pick up topk result TopkResult(result_set, inner_k, context->GetResult()); rc.Record("reduce topk"); From 45c172543f3ccdc05bfd55ec0fe6e271819e8b7c Mon Sep 17 00:00:00 2001 From: yu yunfeng Date: Thu, 4 Jul 2019 12:41:24 +0800 Subject: [PATCH 11/29] MS-151 Fix topk problem Former-commit-id: eeac049095172303c32f4f26ee13b9c8724870b0 --- cpp/src/db/FaissExecutionEngine.cpp | 1 - cpp/src/db/scheduler/task/SearchTask.cpp | 13 +++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/cpp/src/db/FaissExecutionEngine.cpp b/cpp/src/db/FaissExecutionEngine.cpp index 201c07dbcf..20bd530e78 100644 --- a/cpp/src/db/FaissExecutionEngine.cpp +++ b/cpp/src/db/FaissExecutionEngine.cpp @@ -138,7 +138,6 @@ Status FaissExecutionEngine::Search(long n, auto start_time = METRICS_NOW_TIME; std::shared_ptr ivf_index = std::dynamic_pointer_cast(pIndex_); - //ENGINE_LOG_DEBUG << "Index nlist: " << ivf_index->nlist << ", ntotal: "<< ivf_index->ntotal; if(ivf_index) { ENGINE_LOG_DEBUG << "Index type: IVFFLAT nProbe: " << nprobe_; ivf_index->nprobe = nprobe_; diff --git a/cpp/src/db/scheduler/task/SearchTask.cpp b/cpp/src/db/scheduler/task/SearchTask.cpp index 80c7291753..2bfac90e20 100644 --- a/cpp/src/db/scheduler/task/SearchTask.cpp +++ b/cpp/src/db/scheduler/task/SearchTask.cpp @@ -151,7 +151,7 @@ std::shared_ptr SearchTask::Execute() { std::vector output_distence; for(auto& context : search_contexts_) { //step 1: allocate memory - auto inner_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk(); + auto inner_k = context->topk(); output_ids.resize(inner_k*context->nq()); output_distence.resize(inner_k*context->nq()); @@ -164,17 +164,10 @@ std::shared_ptr SearchTask::Execute() { //step 3: cluster result SearchContext::ResultSet result_set; - ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set); + auto spec_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk(); + ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set); rc.Record("cluster result"); - - SERVER_LOG_DEBUG << "Query Result: "; - for(auto& id2score_vector: result_set) { - for(auto& pair: id2score_vector) { - SERVER_LOG_DEBUG << "id: " << pair.first << ", distance: " << pair.second; - } - } - //step 4: pick up topk result TopkResult(result_set, inner_k, context->GetResult()); rc.Record("reduce topk"); From 0b2721dd7d76de6d5a1077b9917c9629962766ad Mon Sep 17 00:00:00 2001 From: starlord Date: Thu, 4 Jul 2019 13:02:47 +0800 Subject: [PATCH 12/29] reduce unittest time cost Former-commit-id: 55e4eb5912766465cb3cd864994f71c08fd971a9 --- cpp/unittest/db/db_tests.cpp | 2 +- cpp/unittest/db/mysql_db_test.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/unittest/db/db_tests.cpp b/cpp/unittest/db/db_tests.cpp index d505320e86..bd17081af8 100644 --- a/cpp/unittest/db/db_tests.cpp +++ b/cpp/unittest/db/db_tests.cpp @@ -21,7 +21,7 @@ namespace { static const std::string TABLE_NAME = "test_group"; static constexpr int64_t TABLE_DIM = 256; static constexpr int64_t VECTOR_COUNT = 250000; - static constexpr int64_t INSERT_LOOP = 100000; + static constexpr int64_t INSERT_LOOP = 10000; engine::meta::TableSchema BuildTableSchema() { engine::meta::TableSchema table_info; diff --git a/cpp/unittest/db/mysql_db_test.cpp b/cpp/unittest/db/mysql_db_test.cpp index 907aa8a0c4..7fdb30a204 100644 --- a/cpp/unittest/db/mysql_db_test.cpp +++ b/cpp/unittest/db/mysql_db_test.cpp @@ -21,7 +21,7 @@ namespace { static const std::string TABLE_NAME = "test_group"; static constexpr int64_t TABLE_DIM = 256; static constexpr int64_t VECTOR_COUNT = 250000; - static constexpr int64_t INSERT_LOOP = 100000; + static constexpr int64_t INSERT_LOOP = 10000; engine::meta::TableSchema BuildTableSchema() { engine::meta::TableSchema table_info; From cf8ee20f20005ce2849a389a7f0f54cc31a29c3d Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 14:04:57 +0800 Subject: [PATCH 13/29] update Former-commit-id: 31aeb1f531676aeb9e9655a169a80ff508f6ae00 --- cpp/coverage.sh | 1 + cpp/src/db/MySQLConnectionPool.cpp | 78 +++++++++++++++++++ cpp/src/db/MySQLConnectionPool.h | 75 +++++-------------- cpp/src/db/MySQLMetaImpl.cpp | 116 ++++++++++++++++++++++++----- 4 files changed, 198 insertions(+), 72 deletions(-) create mode 100644 cpp/src/db/MySQLConnectionPool.cpp diff --git a/cpp/coverage.sh b/cpp/coverage.sh index a0a4cfd03f..7a48e5d451 100755 --- a/cpp/coverage.sh +++ b/cpp/coverage.sh @@ -33,6 +33,7 @@ function mysql_exc() mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};" mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';" mysql_exc "FLUSH PRIVILEGES;" +mysql_exc "USE ${MYSQL_DB_NAME};" # get baseline ${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}" diff --git a/cpp/src/db/MySQLConnectionPool.cpp b/cpp/src/db/MySQLConnectionPool.cpp new file mode 100644 index 0000000000..b43126920e --- /dev/null +++ b/cpp/src/db/MySQLConnectionPool.cpp @@ -0,0 +1,78 @@ +#include "MySQLConnectionPool.h" + +namespace zilliz { +namespace milvus { +namespace engine { +namespace meta { + + // Do a simple form of in-use connection limiting: wait to return + // a connection until there are a reasonably low number in use + // already. Can't do this in create() because we're interested in + // connections actually in use, not those created. Also note that + // we keep our own count; ConnectionPool::size() isn't the same! + mysqlpp::Connection *MySQLConnectionPool::grab() { + while (conns_in_use_ > max_pool_size_) { + sleep(1); + } + + ++conns_in_use_; + return mysqlpp::ConnectionPool::grab(); + } + + // Other half of in-use conn count limit + void MySQLConnectionPool::release(const mysqlpp::Connection *pc) { + mysqlpp::ConnectionPool::release(pc); + + if (conns_in_use_ <= 0) { + ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = " << conns_in_use_; + } else { + --conns_in_use_; + } + } + + int MySQLConnectionPool::getConnectionsInUse() { + return conns_in_use_; + } + + void MySQLConnectionPool::set_max_idle_time(int max_idle) { + max_idle_time_ = max_idle; + } + + std::string MySQLConnectionPool::getDB() { + return db_; + } + + // Superclass overrides + mysqlpp::Connection *MySQLConnectionPool::create() { + + try { + // Create connection using the parameters we were passed upon + // creation. + mysqlpp::Connection *conn = new mysqlpp::Connection(); + conn->set_option(new mysqlpp::ReconnectOption(true)); + conn->connect(db_.empty() ? 0 : db_.c_str(), + server_.empty() ? 0 : server_.c_str(), + user_.empty() ? 0 : user_.c_str(), + password_.empty() ? 0 : password_.c_str(), + port_); + return conn; + } catch (const mysqlpp::ConnectionFailed& er) { + ENGINE_LOG_ERROR << "Failed to connect to database server" << ": " << er.what(); + return nullptr; + } + } + + void MySQLConnectionPool::destroy(mysqlpp::Connection *cp) { + // Our superclass can't know how we created the Connection, so + // it delegates destruction to us, to be safe. + delete cp; + } + + unsigned int MySQLConnectionPool::max_idle_time() { + return max_idle_time_; + } + +} // namespace meta +} // namespace engine +} // namespace milvus +} // namespace zilliz diff --git a/cpp/src/db/MySQLConnectionPool.h b/cpp/src/db/MySQLConnectionPool.h index 6a763a9729..5112993b94 100644 --- a/cpp/src/db/MySQLConnectionPool.h +++ b/cpp/src/db/MySQLConnectionPool.h @@ -6,6 +6,11 @@ #include "Log.h" +namespace zilliz { +namespace milvus { +namespace engine { +namespace meta { + class MySQLConnectionPool : public mysqlpp::ConnectionPool { public: @@ -21,8 +26,7 @@ public: password_(passWord), server_(serverIp), port_(port), - max_pool_size_(maxPoolSize) - { + max_pool_size_(maxPoolSize) { conns_in_use_ = 0; @@ -35,69 +39,25 @@ public: clear(); } - // Do a simple form of in-use connection limiting: wait to return - // a connection until there are a reasonably low number in use - // already. Can't do this in create() because we're interested in - // connections actually in use, not those created. Also note that - // we keep our own count; ConnectionPool::size() isn't the same! - mysqlpp::Connection* grab() override { - while (conns_in_use_ > max_pool_size_) { - sleep(1); - } - - ++conns_in_use_; - return mysqlpp::ConnectionPool::grab(); - } + mysqlpp::Connection *grab() override; // Other half of in-use conn count limit - void release(const mysqlpp::Connection* pc) override { - mysqlpp::ConnectionPool::release(pc); + void release(const mysqlpp::Connection *pc) override; - if (conns_in_use_ <= 0) { - ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = " << conns_in_use_ << std::endl; - } - else { - --conns_in_use_; - } - } + int getConnectionsInUse(); - int getConnectionsInUse() { - return conns_in_use_; - } + void set_max_idle_time(int max_idle); - void set_max_idle_time(int max_idle) { - max_idle_time_ = max_idle; - } - - std::string getDB() { - return db_; - } + std::string getDB(); protected: // Superclass overrides - mysqlpp::Connection* create() override { - // Create connection using the parameters we were passed upon - // creation. - mysqlpp::Connection* conn = new mysqlpp::Connection(); - conn->set_option(new mysqlpp::ReconnectOption(true)); - conn->connect(db_.empty() ? 0 : db_.c_str(), - server_.empty() ? 0 : server_.c_str(), - user_.empty() ? 0 : user_.c_str(), - password_.empty() ? 0 : password_.c_str(), - port_); - return conn; - } + mysqlpp::Connection *create() override; - void destroy(mysqlpp::Connection* cp) override { - // Our superclass can't know how we created the Connection, so - // it delegates destruction to us, to be safe. - delete cp; - } + void destroy(mysqlpp::Connection *cp) override; - unsigned int max_idle_time() override { - return max_idle_time_; - } + unsigned int max_idle_time() override; private: // Number of connections currently in use @@ -110,4 +70,9 @@ private: int max_pool_size_; unsigned int max_idle_time_; -}; \ No newline at end of file +}; + +} // namespace meta +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MySQLMetaImpl.cpp b/cpp/src/db/MySQLMetaImpl.cpp index 5bef070337..f32c5b65e2 100644 --- a/cpp/src/db/MySQLMetaImpl.cpp +++ b/cpp/src/db/MySQLMetaImpl.cpp @@ -169,6 +169,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // ENGINE_LOG_DEBUG << "MySQLMetaImpl::Initialize: connections in use = " << mysql_connection_pool_->getConnectionsInUse(); // if (!connectionPtr->connect(dbName, serverAddress, username, password, port)) { // return Status::Error("DB connection failed: ", connectionPtr->error()); @@ -234,9 +238,6 @@ namespace meta { // } else { // return Status::DBTransactionError("Initialization Error", InitializeQuery.error()); // } - } catch (const ConnectionFailed& er) { - ENGINE_LOG_ERROR << "Failed to connect to database server" << ": " << er.what(); - return Status::DBTransactionError("Failed to connect to database server", er.what()); } catch (const BadQuery& er) { // Handle any query errors ENGINE_LOG_ERROR << "QUERY ERROR DURING INITIALIZATION" << ": " << er.what(); @@ -292,6 +293,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DropPartitionsByDates connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -335,6 +340,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::CreateTable connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -351,7 +360,7 @@ namespace meta { ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); StoreQueryResult res = createTableQuery.store(); - assert(res && res.num_rows() <= 1); + if (res.num_rows() == 1) { int state = res[0]["state"]; if (TableSchema::TO_DELETE == state) { @@ -438,6 +447,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DeleteTable connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -483,6 +496,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DeleteTableFiles connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -529,6 +546,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DescribeTable connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -544,7 +565,6 @@ namespace meta { res = describeTableQuery.store(); } //Scoped Connection - assert(res && res.num_rows() <= 1); if (res.num_rows() == 1) { const Row& resRow = res[0]; @@ -592,6 +612,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::HasTable connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -609,7 +633,6 @@ namespace meta { res = hasTableQuery.store(); } //Scoped Connection - assert(res && res.num_rows() == 1); int check = res[0]["check"]; has_or_not = (check == 1); @@ -639,6 +662,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::AllTables connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -726,6 +753,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::CreateTableFile connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -792,6 +823,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::FilesToIndex connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -875,6 +910,9 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::FilesToSearch connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -986,6 +1024,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::FilesToMerge connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1078,6 +1120,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::GetTableFiles connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1093,8 +1139,6 @@ namespace meta { res = getTableFileQuery.store(); } //Scoped Connection - assert(res); - TableSchema table_schema; table_schema.table_id_ = table_id; auto status = DescribeTable(table_schema); @@ -1162,6 +1206,10 @@ namespace meta { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::Archive connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1212,6 +1260,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::Size connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1226,7 +1278,6 @@ namespace meta { res = getSizeQuery.store(); } //Scoped Connection - assert(res && res.num_rows() == 1); // if (!res) { //// std::cout << "result is NULL" << std::endl; // return Status::DBTransactionError("QUERY ERROR WHEN RETRIEVING SIZE", getSizeQuery.error()); @@ -1272,6 +1323,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DiscardFiles connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1288,7 +1343,6 @@ namespace meta { // std::cout << discardFilesQuery.str() << std::endl; StoreQueryResult res = discardFilesQuery.store(); - assert(res); if (res.num_rows() == 0) { return Status::OK(); } @@ -1350,6 +1404,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::UpdateTableFile connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1365,7 +1423,6 @@ namespace meta { StoreQueryResult res = updateTableFileQuery.store(); - assert(res && res.num_rows() <= 1); if (res.num_rows() == 1) { int state = res[0]["state"]; if (state == TableSchema::TO_DELETE) { @@ -1432,6 +1489,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::UpdateTableFiles connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1455,7 +1516,6 @@ namespace meta { StoreQueryResult res = updateTableFilesQuery.store(); - assert(res && res.num_rows() == 1); int check = res[0]["check"]; has_tables[file_schema.table_id_] = (check == 1); } @@ -1527,6 +1587,10 @@ namespace meta { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::CleanUpFilesWithTTL: clean table files: connection in use after creating ScopedConnection = " // << mysql_connection_pool_->getConnectionsInUse(); @@ -1542,8 +1606,6 @@ namespace meta { StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - assert(res); - TableFileSchema table_file; std::vector idsToDelete; @@ -1611,6 +1673,10 @@ namespace meta { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::CleanUpFilesWithTTL: clean tables: connection in use after creating ScopedConnection = " // << mysql_connection_pool_->getConnectionsInUse(); @@ -1624,7 +1690,6 @@ namespace meta { ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - assert(res); // std::cout << res.num_rows() << std::endl; if (!res.empty()) { @@ -1677,6 +1742,10 @@ namespace meta { try { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::CleanUp: connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1690,7 +1759,7 @@ namespace meta { ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str(); StoreQueryResult res = cleanUpQuery.store(); - assert(res); + if (!res.empty()) { ENGINE_LOG_DEBUG << "Remove table file type as NEW"; cleanUpQuery << "DELETE FROM TableFiles WHERE file_type = " << std::to_string(TableFileSchema::NEW) << ";"; @@ -1736,6 +1805,10 @@ namespace meta { { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::Count: connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } @@ -1759,7 +1832,12 @@ namespace meta { result += size; } - assert(table_schema.dimension_ != 0); + if (table_schema.dimension_ <= 0) { + std::stringstream errorMsg; + errorMsg << "MySQLMetaImpl::Count: " << "table dimension = " << std::to_string(table_schema.dimension_) << ", table_id = " << table_id; + ENGINE_LOG_ERROR << errorMsg.str(); + return Status::Error(errorMsg.str()); + } result /= table_schema.dimension_; result /= sizeof(float); @@ -1786,6 +1864,10 @@ namespace meta { ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab); + if (connectionPtr == nullptr) { + return Status::Error("Failed to connect to database server"); + } + // if (mysql_connection_pool_->getConnectionsInUse() <= 0) { // ENGINE_LOG_WARNING << "MySQLMetaImpl::DropAll: connection in use = " << mysql_connection_pool_->getConnectionsInUse(); // } From 45eaa66b5944e55c56f5447aff06debe97d467fe Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 14:20:59 +0800 Subject: [PATCH 14/29] update Former-commit-id: 660919641b476988d86b5db64d190192bb3c2ffe --- cpp/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 630b86d735..712f77f591 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -40,6 +40,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-124 - HasTable interface - MS-126 - Add more error code - MS-128 - Change default db path +- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl ## New Feature From 10880b0288a0334e01e1f247c93fa5bfc0dc7a16 Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 15:58:28 +0800 Subject: [PATCH 15/29] fix c_str error when connecting to MySQL Former-commit-id: c17d2ed43291d54dd0c1b29b222dae5505c98105 --- cpp/src/db/MySQLMetaImpl.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cpp/src/db/MySQLMetaImpl.cpp b/cpp/src/db/MySQLMetaImpl.cpp index f32c5b65e2..8ba7f1d3b6 100644 --- a/cpp/src/db/MySQLMetaImpl.cpp +++ b/cpp/src/db/MySQLMetaImpl.cpp @@ -144,15 +144,15 @@ namespace meta { if (dialect.find("mysql") == std::string::npos) { return Status::Error("URI's dialect is not MySQL"); } - const char* username = pieces_match[2].str().c_str(); - const char* password = pieces_match[3].str().c_str(); - const char* serverAddress = pieces_match[4].str().c_str(); + std::string username = pieces_match[2].str(); + std::string password = pieces_match[3].str(); + std::string serverAddress = pieces_match[4].str(); unsigned int port = 0; if (!pieces_match[5].str().empty()) { port = std::stoi(pieces_match[5].str()); } - const char* dbName = pieces_match[6].str().c_str(); - //std::cout << dbName << " " << serverAddress << " " << username << " " << password << " " << port << std::endl; + std::string dbName = pieces_match[6].str(); +// std::cout << dbName << " " << serverAddress << " " << username << " " << password << " " << port << std::endl; // connectionPtr->set_option(new MultiStatementsOption(true)); // connectionPtr->set_option(new mysqlpp::ReconnectOption(true)); int threadHint = std::thread::hardware_concurrency(); @@ -1753,8 +1753,8 @@ namespace meta { Query cleanUpQuery = connectionPtr->query(); cleanUpQuery << "SELECT table_name " << "FROM information_schema.tables " << - "WHERE table_schema = " << quote << mysql_connection_pool_->getDB() << quote << " " << - "AND table_name = " << quote << "TableFiles" << quote << ";"; + "WHERE table_schema = " << quote << mysql_connection_pool_->getDB() << " " << + "AND table_name = " << quote << "TableFiles" << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str(); From 43d90c2045fc5012046a19b41c84599e92a4fcc1 Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 15:58:38 +0800 Subject: [PATCH 16/29] fix c_str error when connecting to MySQL Former-commit-id: 2575292b943cdd9d4035492b78616f7be5f060f3 --- cpp/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 712f77f591..038289a2e0 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -27,6 +27,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-110 - Avoid huge file size - MS-148 - Disable cleanup if mode is read only - MS-149 - Fixed searching only one index file issue in distributed mode +- MS-153 - fix c_str error when connecting to MySQL ## Improvement - MS-82 - Update server startup welcome message From f8cdf0786ba1abf51bda40dccc1fb8ed0c68e02d Mon Sep 17 00:00:00 2001 From: starlord Date: Thu, 4 Jul 2019 16:58:40 +0800 Subject: [PATCH 17/29] add uiittest for merge result functions Former-commit-id: 0ad3ac4b08e06a1c64249aea05f0c62efa3fe57a --- cpp/CHANGELOG.md | 1 + cpp/src/db/scheduler/context/SearchContext.h | 4 +- cpp/src/db/scheduler/task/SearchTask.cpp | 215 ++++++++++--------- cpp/src/db/scheduler/task/SearchTask.h | 14 ++ cpp/unittest/db/search_test.cpp | 162 ++++++++++++++ 5 files changed, 294 insertions(+), 102 deletions(-) create mode 100644 cpp/unittest/db/search_test.cpp diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 630b86d735..42b11ab957 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -8,6 +8,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Bug ## Improvement +- MS-156 - Add unittest for merge result functions ## New Feature diff --git a/cpp/src/db/scheduler/context/SearchContext.h b/cpp/src/db/scheduler/context/SearchContext.h index 1997b80764..e81622eb32 100644 --- a/cpp/src/db/scheduler/context/SearchContext.h +++ b/cpp/src/db/scheduler/context/SearchContext.h @@ -32,8 +32,8 @@ public: using Id2IndexMap = std::unordered_map; const Id2IndexMap& GetIndexMap() const { return map_index_files_; } - using Id2ScoreMap = std::vector>; - using ResultSet = std::vector; + using Id2DistanceMap = std::vector>; + using ResultSet = std::vector; const ResultSet& GetResult() const { return result_; } ResultSet& GetResult() { return result_; } diff --git a/cpp/src/db/scheduler/task/SearchTask.cpp b/cpp/src/db/scheduler/task/SearchTask.cpp index 2bfac90e20..708bcc8708 100644 --- a/cpp/src/db/scheduler/task/SearchTask.cpp +++ b/cpp/src/db/scheduler/task/SearchTask.cpp @@ -13,104 +13,6 @@ namespace milvus { namespace engine { namespace { -void ClusterResult(const std::vector &output_ids, - const std::vector &output_distence, - uint64_t nq, - uint64_t topk, - SearchContext::ResultSet &result_set) { - result_set.clear(); - result_set.reserve(nq); - for (auto i = 0; i < nq; i++) { - SearchContext::Id2ScoreMap id_score; - id_score.reserve(topk); - for (auto k = 0; k < topk; k++) { - uint64_t index = i * topk + k; - if(output_ids[index] < 0) { - continue; - } - id_score.push_back(std::make_pair(output_ids[index], output_distence[index])); - } - result_set.emplace_back(id_score); - } -} - -void MergeResult(SearchContext::Id2ScoreMap &score_src, - SearchContext::Id2ScoreMap &score_target, - uint64_t topk) { - //Note: the score_src and score_target are already arranged by score in ascending order - if(score_src.empty()) { - return; - } - - if(score_target.empty()) { - score_target.swap(score_src); - return; - } - - size_t src_count = score_src.size(); - size_t target_count = score_target.size(); - SearchContext::Id2ScoreMap score_merged; - score_merged.reserve(topk); - size_t src_index = 0, target_index = 0; - while(true) { - //all score_src items are merged, if score_merged.size() still less than topk - //move items from score_target to score_merged until score_merged.size() equal topk - if(src_index >= src_count) { - for(size_t i = target_index; i < target_count && score_merged.size() < topk; ++i) { - score_merged.push_back(score_target[i]); - } - break; - } - - //all score_target items are merged, if score_merged.size() still less than topk - //move items from score_src to score_merged until score_merged.size() equal topk - if(target_index >= target_count) { - for(size_t i = src_index; i < src_count && score_merged.size() < topk; ++i) { - score_merged.push_back(score_src[i]); - } - break; - } - - //compare score, put smallest score to score_merged one by one - auto& src_pair = score_src[src_index]; - auto& target_pair = score_target[target_index]; - if(src_pair.second > target_pair.second) { - score_merged.push_back(target_pair); - target_index++; - } else { - score_merged.push_back(src_pair); - src_index++; - } - - //score_merged.size() already equal topk - if(score_merged.size() >= topk) { - break; - } - } - - score_target.swap(score_merged); -} - -void TopkResult(SearchContext::ResultSet &result_src, - uint64_t topk, - SearchContext::ResultSet &result_target) { - if (result_target.empty()) { - result_target.swap(result_src); - return; - } - - if (result_src.size() != result_target.size()) { - SERVER_LOG_ERROR << "Invalid result set"; - return; - } - - for (size_t i = 0; i < result_src.size(); i++) { - SearchContext::Id2ScoreMap &score_src = result_src[i]; - SearchContext::Id2ScoreMap &score_target = result_target[i]; - MergeResult(score_src, score_target, topk); - } -} - void CollectDurationMetrics(int index_type, double total_time) { switch(index_type) { case meta::TableFileSchema::RAW: { @@ -165,11 +67,11 @@ std::shared_ptr SearchTask::Execute() { //step 3: cluster result SearchContext::ResultSet result_set; auto spec_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk(); - ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set); + SearchTask::ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set); rc.Record("cluster result"); //step 4: pick up topk result - TopkResult(result_set, inner_k, context->GetResult()); + SearchTask::TopkResult(result_set, inner_k, context->GetResult()); rc.Record("reduce topk"); } catch (std::exception& ex) { @@ -191,6 +93,119 @@ std::shared_ptr SearchTask::Execute() { return nullptr; } +Status SearchTask::ClusterResult(const std::vector &output_ids, + const std::vector &output_distence, + uint64_t nq, + uint64_t topk, + SearchContext::ResultSet &result_set) { + if(output_ids.size() != nq*topk || output_distence.size() != nq*topk) { + std::string msg = "Invalid id array size: " + std::to_string(output_ids.size()) + + " distance array size: " + std::to_string(output_distence.size()); + SERVER_LOG_ERROR << msg; + return Status::Error(msg); + } + + result_set.clear(); + result_set.reserve(nq); + for (auto i = 0; i < nq; i++) { + SearchContext::Id2DistanceMap id_distance; + id_distance.reserve(topk); + for (auto k = 0; k < topk; k++) { + uint64_t index = i * topk + k; + if(output_ids[index] < 0) { + continue; + } + id_distance.push_back(std::make_pair(output_ids[index], output_distence[index])); + } + result_set.emplace_back(id_distance); + } + + return Status::OK(); +} + +Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src, + SearchContext::Id2DistanceMap &distance_target, + uint64_t topk) { + //Note: the score_src and score_target are already arranged by score in ascending order + if(distance_src.empty()) { + SERVER_LOG_WARNING << "Empty distance source array"; + return Status::OK(); + } + + if(distance_target.empty()) { + distance_target.swap(distance_src); + return Status::OK(); + } + + size_t src_count = distance_src.size(); + size_t target_count = distance_target.size(); + SearchContext::Id2DistanceMap distance_merged; + distance_merged.reserve(topk); + size_t src_index = 0, target_index = 0; + while(true) { + //all score_src items are merged, if score_merged.size() still less than topk + //move items from score_target to score_merged until score_merged.size() equal topk + if(src_index >= src_count) { + for(size_t i = target_index; i < target_count && distance_merged.size() < topk; ++i) { + distance_merged.push_back(distance_target[i]); + } + break; + } + + //all score_target items are merged, if score_merged.size() still less than topk + //move items from score_src to score_merged until score_merged.size() equal topk + if(target_index >= target_count) { + for(size_t i = src_index; i < src_count && distance_merged.size() < topk; ++i) { + distance_merged.push_back(distance_src[i]); + } + break; + } + + //compare score, put smallest score to score_merged one by one + auto& src_pair = distance_src[src_index]; + auto& target_pair = distance_target[target_index]; + if(src_pair.second > target_pair.second) { + distance_merged.push_back(target_pair); + target_index++; + } else { + distance_merged.push_back(src_pair); + src_index++; + } + + //score_merged.size() already equal topk + if(distance_merged.size() >= topk) { + break; + } + } + + distance_target.swap(distance_merged); + + return Status::OK(); +} + +Status SearchTask::TopkResult(SearchContext::ResultSet &result_src, + uint64_t topk, + SearchContext::ResultSet &result_target) { + if (result_target.empty()) { + result_target.swap(result_src); + return Status::OK(); + } + + if (result_src.size() != result_target.size()) { + std::string msg = "Invalid result set size"; + SERVER_LOG_ERROR << msg; + return Status::Error(msg); + } + + for (size_t i = 0; i < result_src.size(); i++) { + SearchContext::Id2DistanceMap &score_src = result_src[i]; + SearchContext::Id2DistanceMap &score_target = result_target[i]; + SearchTask::MergeResult(score_src, score_target, topk); + } + + return Status::OK(); +} + } } } diff --git a/cpp/src/db/scheduler/task/SearchTask.h b/cpp/src/db/scheduler/task/SearchTask.h index 0b3a236ce4..e4f0d872b1 100644 --- a/cpp/src/db/scheduler/task/SearchTask.h +++ b/cpp/src/db/scheduler/task/SearchTask.h @@ -19,6 +19,20 @@ public: virtual std::shared_ptr Execute() override; + static Status ClusterResult(const std::vector &output_ids, + const std::vector &output_distence, + uint64_t nq, + uint64_t topk, + SearchContext::ResultSet &result_set); + + static Status MergeResult(SearchContext::Id2DistanceMap &distance_src, + SearchContext::Id2DistanceMap &distance_target, + uint64_t topk); + + static Status TopkResult(SearchContext::ResultSet &result_src, + uint64_t topk, + SearchContext::ResultSet &result_target); + public: size_t index_id_ = 0; int index_type_ = 0; //for metrics diff --git a/cpp/unittest/db/search_test.cpp b/cpp/unittest/db/search_test.cpp new file mode 100644 index 0000000000..db10bcbadf --- /dev/null +++ b/cpp/unittest/db/search_test.cpp @@ -0,0 +1,162 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved +// Unauthorized copying of this file, via any medium is strictly prohibited. +// Proprietary and confidential. +//////////////////////////////////////////////////////////////////////////////// +#include + +#include "db/scheduler/task/SearchTask.h" + +#include + +using namespace zilliz::milvus; + +namespace { + +static constexpr uint64_t NQ = 15; +static constexpr uint64_t TOP_K = 64; + +void BuildResult(uint64_t nq, + uint64_t top_k, + std::vector &output_ids, + std::vector &output_distence) { + output_ids.clear(); + output_ids.resize(nq*top_k); + output_distence.clear(); + output_distence.resize(nq*top_k); + + for(uint64_t i = 0; i < nq; i++) { + for(uint64_t j = 0; j < top_k; j++) { + output_ids[i * top_k + j] = (long)(drand48()*100000); + output_distence[i * top_k + j] = j + drand48(); + } + } +} + +void CheckResult(const engine::SearchContext::Id2DistanceMap& src_1, + const engine::SearchContext::Id2DistanceMap& src_2, + const engine::SearchContext::Id2DistanceMap& target) { + for(uint64_t i = 0; i < target.size() - 1; i++) { + ASSERT_LE(target[i].second, target[i + 1].second); + } + + using ID2DistMap = std::map; + ID2DistMap src_map_1, src_map_2; + for(const auto& pair : src_1) { + src_map_1.insert(pair); + } + for(const auto& pair : src_2) { + src_map_2.insert(pair); + } + + for(const auto& pair : target) { + ASSERT_TRUE(src_map_1.find(pair.first) != src_map_1.end() || src_map_2.find(pair.first) != src_map_2.end()); + + float dist = src_map_1.find(pair.first) != src_map_1.end() ? src_map_1[pair.first] : src_map_2[pair.first]; + ASSERT_LT(fabs(pair.second - dist), std::numeric_limits::epsilon()); + } +} + +} + +TEST(DBSearchTest, TOPK_TEST) { + std::vector target_ids; + std::vector target_distence; + engine::SearchContext::ResultSet src_result; + auto status = engine::SearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result); + ASSERT_FALSE(status.ok()); + ASSERT_TRUE(src_result.empty()); + + BuildResult(NQ, TOP_K, target_ids, target_distence); + status = engine::SearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result); + ASSERT_TRUE(status.ok()); + ASSERT_EQ(src_result.size(), NQ); + + engine::SearchContext::ResultSet target_result; + status = engine::SearchTask::TopkResult(target_result, TOP_K, target_result); + ASSERT_TRUE(status.ok()); + + status = engine::SearchTask::TopkResult(target_result, TOP_K, src_result); + ASSERT_FALSE(status.ok()); + + status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(src_result.empty()); + ASSERT_EQ(target_result.size(), NQ); + + std::vector src_ids; + std::vector src_distence; + uint64_t wrong_topk = TOP_K - 10; + BuildResult(NQ, wrong_topk, src_ids, src_distence); + + status = engine::SearchTask::ClusterResult(src_ids, src_distence, NQ, wrong_topk, src_result); + ASSERT_TRUE(status.ok()); + + status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result); + ASSERT_TRUE(status.ok()); + for(uint64_t i = 0; i < NQ; i++) { + ASSERT_EQ(target_result[i].size(), TOP_K); + } + + wrong_topk = TOP_K + 10; + BuildResult(NQ, wrong_topk, src_ids, src_distence); + + status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result); + ASSERT_TRUE(status.ok()); + for(uint64_t i = 0; i < NQ; i++) { + ASSERT_EQ(target_result[i].size(), TOP_K); + } +} + +TEST(DBSearchTest, MERGE_TEST) { + std::vector target_ids; + std::vector target_distence; + std::vector src_ids; + std::vector src_distence; + engine::SearchContext::ResultSet src_result, target_result; + + uint64_t src_count = 5, target_count = 8; + BuildResult(1, src_count, src_ids, src_distence); + BuildResult(1, target_count, target_ids, target_distence); + auto status = engine::SearchTask::ClusterResult(src_ids, src_distence, 1, src_count, src_result); + ASSERT_TRUE(status.ok()); + status = engine::SearchTask::ClusterResult(target_ids, target_distence, 1, target_count, target_result); + ASSERT_TRUE(status.ok()); + + { + engine::SearchContext::Id2DistanceMap src = src_result[0]; + engine::SearchContext::Id2DistanceMap target = target_result[0]; + status = engine::SearchTask::MergeResult(src, target, 10); + ASSERT_TRUE(status.ok()); + ASSERT_EQ(target.size(), 10); + CheckResult(src_result[0], target_result[0], target); + } + + { + engine::SearchContext::Id2DistanceMap src = src_result[0]; + engine::SearchContext::Id2DistanceMap target; + status = engine::SearchTask::MergeResult(src, target, 10); + ASSERT_TRUE(status.ok()); + ASSERT_EQ(target.size(), src_count); + ASSERT_TRUE(src.empty()); + CheckResult(src_result[0], target_result[0], target); + } + + { + engine::SearchContext::Id2DistanceMap src = src_result[0]; + engine::SearchContext::Id2DistanceMap target = target_result[0]; + status = engine::SearchTask::MergeResult(src, target, 30); + ASSERT_TRUE(status.ok()); + ASSERT_EQ(target.size(), src_count + target_count); + CheckResult(src_result[0], target_result[0], target); + } + + { + engine::SearchContext::Id2DistanceMap target = src_result[0]; + engine::SearchContext::Id2DistanceMap src = target_result[0]; + status = engine::SearchTask::MergeResult(src, target, 30); + ASSERT_TRUE(status.ok()); + ASSERT_EQ(target.size(), src_count + target_count); + CheckResult(src_result[0], target_result[0], target); + } +} \ No newline at end of file From f63c3899626c40ec51f35c6f52a1cea770b6a3ab Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 17:15:38 +0800 Subject: [PATCH 18/29] fix changelog Former-commit-id: 6e353ed65ec462256e4787e41b4716975dfd5a4c --- cpp/CHANGELOG.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 038289a2e0..c6c89ba040 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -7,8 +7,14 @@ Please mark all change in change log and use the ticket from JIRA. ## Bug +- MS-148 - Disable cleanup if mode is read only +- MS-149 - Fixed searching only one index file issue in distributed mode +- MS-153 - fix c_str error when connecting to MySQL + ## Improvement +- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl + ## New Feature ## Task @@ -25,9 +31,6 @@ Please mark all change in change log and use the ticket from JIRA. - MS-90 - Fix arch match incorrect on ARM - MS-99 - Fix compilation bug - MS-110 - Avoid huge file size -- MS-148 - Disable cleanup if mode is read only -- MS-149 - Fixed searching only one index file issue in distributed mode -- MS-153 - fix c_str error when connecting to MySQL ## Improvement - MS-82 - Update server startup welcome message @@ -41,7 +44,6 @@ Please mark all change in change log and use the ticket from JIRA. - MS-124 - HasTable interface - MS-126 - Add more error code - MS-128 - Change default db path -- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl ## New Feature From b88314c6807781a14434c9c66e0a092c335d2c8f Mon Sep 17 00:00:00 2001 From: zhiru Date: Thu, 4 Jul 2019 17:17:09 +0800 Subject: [PATCH 19/29] fix changelog Former-commit-id: 56acf5034ef3cf2c07146684d72a19ada980d69f --- cpp/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index c6c89ba040..380d381b15 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -10,6 +10,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-148 - Disable cleanup if mode is read only - MS-149 - Fixed searching only one index file issue in distributed mode - MS-153 - fix c_str error when connecting to MySQL +- MS-157 - fix changelog ## Improvement From 172cd21daf644dc523c06af95fd428719ce6a693 Mon Sep 17 00:00:00 2001 From: zhiru Date: Fri, 5 Jul 2019 15:03:40 +0800 Subject: [PATCH 20/29] add mem impl Former-commit-id: 074f1ade11572923ddee2653c26ce6a143001b3c --- cpp/src/db/Constants.h | 20 ++++ cpp/src/db/MemTable.cpp | 51 ++++++++++ cpp/src/db/MemTable.h | 40 ++++++++ cpp/src/db/MemTableFile.cpp | 66 +++++++++++++ cpp/src/db/MemTableFile.h | 44 +++++++++ cpp/src/db/VectorSource.cpp | 60 +++++++++++ cpp/src/db/VectorSource.h | 41 ++++++++ cpp/unittest/db/mem_test.cpp | 187 +++++++++++++++++++++++++++++++++++ 8 files changed, 509 insertions(+) create mode 100644 cpp/src/db/Constants.h create mode 100644 cpp/src/db/MemTable.cpp create mode 100644 cpp/src/db/MemTable.h create mode 100644 cpp/src/db/MemTableFile.cpp create mode 100644 cpp/src/db/MemTableFile.h create mode 100644 cpp/src/db/VectorSource.cpp create mode 100644 cpp/src/db/VectorSource.h create mode 100644 cpp/unittest/db/mem_test.cpp diff --git a/cpp/src/db/Constants.h b/cpp/src/db/Constants.h new file mode 100644 index 0000000000..2bb2e0a064 --- /dev/null +++ b/cpp/src/db/Constants.h @@ -0,0 +1,20 @@ +/******************************************************************************* + * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved + * Unauthorized copying of this file, via any medium is strictly prohibited. + * Proprietary and confidential. + ******************************************************************************/ +#pragma once + +namespace zilliz { +namespace milvus { +namespace engine { + +const size_t K = 1024UL; +const size_t M = K*K; +const size_t MAX_TABLE_FILE_MEM = 128 * M; + +const int VECTOR_TYPE_SIZE = sizeof(float); + +} // namespace engine +} // namespace milvus +} // namespace zilliz diff --git a/cpp/src/db/MemTable.cpp b/cpp/src/db/MemTable.cpp new file mode 100644 index 0000000000..032d479999 --- /dev/null +++ b/cpp/src/db/MemTable.cpp @@ -0,0 +1,51 @@ +#include "MemTable.h" +#include "Log.h" + +namespace zilliz { +namespace milvus { +namespace engine { + +MemTable::MemTable(const std::string& table_id, + const std::shared_ptr& meta) : + table_id_(table_id), + meta_(meta) { + +} + +Status MemTable::Add(VectorSource::Ptr& source) { + while (!source->AllAdded()) { + MemTableFile::Ptr currentMemTableFile; + if (!mem_table_file_stack_.empty()) { + currentMemTableFile = mem_table_file_stack_.top(); + } + Status status; + if (mem_table_file_stack_.empty() || currentMemTableFile->isFull()) { + MemTableFile::Ptr newMemTableFile = std::make_shared(table_id_, meta_); + status = newMemTableFile->Add(source); + if (status.ok()) { + mem_table_file_stack_.push(newMemTableFile); + } + } + else { + status = currentMemTableFile->Add(source); + } + if (!status.ok()) { + std::string errMsg = "MemTable::Add failed: " + status.ToString(); + ENGINE_LOG_ERROR << errMsg; + return Status::Error(errMsg); + } + } + return Status::OK(); +} + +void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file) { + mem_table_file = mem_table_file_stack_.top(); +} + +size_t MemTable::GetStackSize() { + return mem_table_file_stack_.size(); +} + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTable.h b/cpp/src/db/MemTable.h new file mode 100644 index 0000000000..b9fe4147d8 --- /dev/null +++ b/cpp/src/db/MemTable.h @@ -0,0 +1,40 @@ +#pragma once + +#include "Status.h" +#include "MemTableFile.h" +#include "VectorSource.h" + +#include + +namespace zilliz { +namespace milvus { +namespace engine { + +class MemTable { + +public: + + using Ptr = std::shared_ptr; + using MemTableFileStack = std::stack; + using MetaPtr = meta::Meta::Ptr; + + MemTable(const std::string& table_id, const std::shared_ptr& meta); + + Status Add(VectorSource::Ptr& source); + + void GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file); + + size_t GetStackSize(); + +private: + const std::string table_id_; + + MemTableFileStack mem_table_file_stack_; + + MetaPtr meta_; + +}; //MemTable + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTableFile.cpp b/cpp/src/db/MemTableFile.cpp new file mode 100644 index 0000000000..26bc0d38e9 --- /dev/null +++ b/cpp/src/db/MemTableFile.cpp @@ -0,0 +1,66 @@ +#include "MemTableFile.h" +#include "Constants.h" +#include "Log.h" + +#include + +namespace zilliz { +namespace milvus { +namespace engine { + +MemTableFile::MemTableFile(const std::string& table_id, + const std::shared_ptr& meta) : + table_id_(table_id), + meta_(meta) { + + current_mem_ = 0; + CreateTableFile(); +} + +Status MemTableFile::CreateTableFile() { + + meta::TableFileSchema table_file_schema; + table_file_schema.table_id_ = table_id_; + auto status = meta_->CreateTableFile(table_file_schema); + if (status.ok()) { + table_file_schema_ = table_file_schema; + } + else { + std::string errMsg = "MemTableFile::CreateTableFile failed: " + status.ToString(); + ENGINE_LOG_ERROR << errMsg; + } + return status; +} + +Status MemTableFile::Add(const VectorSource::Ptr& source) { + + size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; + size_t memLeft = GetMemLeft(); + if (memLeft >= singleVectorMemSize) { + size_t numVectorsToAdd = std::ceil(memLeft / singleVectorMemSize); + size_t numVectorsAdded; + auto status = source->Add(table_file_schema_, numVectorsToAdd, numVectorsAdded); + if (status.ok()) { + current_mem_ += (numVectorsAdded * singleVectorMemSize); + } + return status; + } + return Status::OK(); +} + +size_t MemTableFile::GetCurrentMem() { + return current_mem_; +} + +size_t MemTableFile::GetMemLeft() { + return (MAX_TABLE_FILE_MEM - current_mem_); +} + +bool MemTableFile::isFull() { + size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; + return (GetMemLeft() < singleVectorMemSize); +} + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTableFile.h b/cpp/src/db/MemTableFile.h new file mode 100644 index 0000000000..1efe4c0bfe --- /dev/null +++ b/cpp/src/db/MemTableFile.h @@ -0,0 +1,44 @@ +#pragma once + +#include "Status.h" +#include "Meta.h" +#include "VectorSource.h" + +namespace zilliz { +namespace milvus { +namespace engine { + +class MemTableFile { + +public: + + using Ptr = std::shared_ptr; + using MetaPtr = meta::Meta::Ptr; + + MemTableFile(const std::string& table_id, const std::shared_ptr& meta); + + Status Add(const VectorSource::Ptr& source); + + size_t GetCurrentMem(); + + size_t GetMemLeft(); + + bool isFull(); + +private: + + Status CreateTableFile(); + + const std::string table_id_; + + meta::TableFileSchema table_file_schema_; + + MetaPtr meta_; + + size_t current_mem_; + +}; //MemTableFile + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/VectorSource.cpp b/cpp/src/db/VectorSource.cpp new file mode 100644 index 0000000000..dff5423c6f --- /dev/null +++ b/cpp/src/db/VectorSource.cpp @@ -0,0 +1,60 @@ +#include "VectorSource.h" +#include "ExecutionEngine.h" +#include "EngineFactory.h" +#include "Log.h" + +namespace zilliz { +namespace milvus { +namespace engine { + + +VectorSource::VectorSource(const size_t &n, + const float *vectors) : + n_(n), + vectors_(vectors), + id_generator_(new SimpleIDGenerator()) { + current_num_vectors_added = 0; +} + +Status VectorSource::Add(const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added) { + + if (table_file_schema.dimension_ <= 0) { + std::string errMsg = "VectorSource::Add: table_file_schema dimension = " + + std::to_string(table_file_schema.dimension_) + ", table_id = " + table_file_schema.table_id_; + ENGINE_LOG_ERROR << errMsg; + return Status::Error(errMsg); + } + ExecutionEnginePtr engine = EngineFactory::Build(table_file_schema.dimension_, + table_file_schema.location_, + (EngineType)table_file_schema.engine_type_); + + num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ? num_vectors_to_add : n_ - current_num_vectors_added; + IDNumbers vector_ids_to_add; + id_generator_->GetNextIDNumbers(num_vectors_added, vector_ids_to_add); + Status status = engine->AddWithIds(num_vectors_added, vectors_ + current_num_vectors_added, vector_ids_to_add.data()); + if (status.ok()) { + current_num_vectors_added += num_vectors_added; + vector_ids_.insert(vector_ids_.end(), vector_ids_to_add.begin(), vector_ids_to_add.end()); + } + else { + ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString(); + } + + return status; +} + +size_t VectorSource::GetNumVectorsAdded() { + return current_num_vectors_added; +} + +bool VectorSource::AllAdded() { + return (current_num_vectors_added == n_); +} + +IDNumbers VectorSource::GetVectorIds() { + return vector_ids_; +} + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/VectorSource.h b/cpp/src/db/VectorSource.h new file mode 100644 index 0000000000..170f3634cf --- /dev/null +++ b/cpp/src/db/VectorSource.h @@ -0,0 +1,41 @@ +#pragma once + +#include "Status.h" +#include "Meta.h" +#include "IDGenerator.h" + +namespace zilliz { +namespace milvus { +namespace engine { + +class VectorSource { + +public: + + using Ptr = std::shared_ptr; + + VectorSource(const size_t& n, const float* vectors); + + Status Add(const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added); + + size_t GetNumVectorsAdded(); + + bool AllAdded(); + + IDNumbers GetVectorIds(); + +private: + + const size_t n_; + const float* vectors_; + IDNumbers vector_ids_; + + size_t current_num_vectors_added; + + IDGenerator* id_generator_; + +}; //VectorSource + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp new file mode 100644 index 0000000000..8418b9cd2d --- /dev/null +++ b/cpp/unittest/db/mem_test.cpp @@ -0,0 +1,187 @@ +#include "gtest/gtest.h" + +#include "db/VectorSource.h" +#include "db/MemTableFile.h" +#include "db/MemTable.h" +#include "utils.h" +#include "db/Factories.h" +#include "db/Constants.h" + +using namespace zilliz::milvus; + +namespace { + + static const std::string TABLE_NAME = "test_group"; + static constexpr int64_t TABLE_DIM = 256; + static constexpr int64_t VECTOR_COUNT = 250000; + static constexpr int64_t INSERT_LOOP = 10000; + + engine::meta::TableSchema BuildTableSchema() { + engine::meta::TableSchema table_info; + table_info.dimension_ = TABLE_DIM; + table_info.table_id_ = TABLE_NAME; + table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP; + return table_info; + } + + void BuildVectors(int64_t n, std::vector& vectors) { + vectors.clear(); + vectors.resize(n*TABLE_DIM); + float* data = vectors.data(); + for(int i = 0; i < n; i++) { + for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48(); + data[TABLE_DIM * i] += i / 2000.; + } + } +} + +TEST(MEM_TEST, VECTOR_SOURCE_TEST) { + + std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); + + engine::meta::TableSchema table_schema = BuildTableSchema(); + auto status = impl_->CreateTable(table_schema); + ASSERT_TRUE(status.ok()); + + engine::meta::TableFileSchema table_file_schema; + table_file_schema.table_id_ = TABLE_NAME; + status = impl_->CreateTableFile(table_file_schema); + ASSERT_TRUE(status.ok()); + + int64_t n = 100; + std::vector vectors; + BuildVectors(n, vectors); + + engine::VectorSource source(n, vectors.data()); + + size_t num_vectors_added; + status = source.Add(table_file_schema, 50, num_vectors_added); + ASSERT_TRUE(status.ok()); + + ASSERT_EQ(num_vectors_added, 50); + + engine::IDNumbers vector_ids = source.GetVectorIds(); + ASSERT_EQ(vector_ids.size(), 50); + + status = source.Add(table_file_schema, 60, num_vectors_added); + ASSERT_TRUE(status.ok()); + + ASSERT_EQ(num_vectors_added, 50); + + vector_ids = source.GetVectorIds(); + ASSERT_EQ(vector_ids.size(), 100); + +// for (auto& id : vector_ids) { +// std::cout << id << std::endl; +// } + + status = impl_->DropAll(); + ASSERT_TRUE(status.ok()); +} + +TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { + + std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); + + engine::meta::TableSchema table_schema = BuildTableSchema(); + auto status = impl_->CreateTable(table_schema); + ASSERT_TRUE(status.ok()); + + engine::MemTableFile memTableFile(TABLE_NAME, impl_); + + int64_t n_100 = 100; + std::vector vectors_100; + BuildVectors(n_100, vectors_100); + + engine::VectorSource::Ptr source = std::make_shared(n_100, vectors_100.data()); + + status = memTableFile.Add(source); + ASSERT_TRUE(status.ok()); + +// std::cout << memTableFile.GetCurrentMem() << " " << memTableFile.GetMemLeft() << std::endl; + + engine::IDNumbers vector_ids = source->GetVectorIds(); + ASSERT_EQ(vector_ids.size(), 100); + + size_t singleVectorMem = sizeof(float) * TABLE_DIM; + ASSERT_EQ(memTableFile.GetCurrentMem(), n_100 * singleVectorMem); + + int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem; + std::vector vectors_128M; + BuildVectors(n_max, vectors_128M); + + engine::VectorSource::Ptr source_128M = std::make_shared(n_max, vectors_128M.data()); + status = memTableFile.Add(source_128M); + + vector_ids = source_128M->GetVectorIds(); + ASSERT_EQ(vector_ids.size(), n_max - n_100); + + ASSERT_TRUE(memTableFile.isFull()); + + status = impl_->DropAll(); + ASSERT_TRUE(status.ok()); +} + +TEST(MEM_TEST, MEM_TABLE_TEST) { + + std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); + + engine::meta::TableSchema table_schema = BuildTableSchema(); + auto status = impl_->CreateTable(table_schema); + ASSERT_TRUE(status.ok()); + + int64_t n_100 = 100; + std::vector vectors_100; + BuildVectors(n_100, vectors_100); + + engine::VectorSource::Ptr source_100 = std::make_shared(n_100, vectors_100.data()); + + engine::MemTable memTable(TABLE_NAME, impl_); + + status = memTable.Add(source_100); + ASSERT_TRUE(status.ok()); + + engine::IDNumbers vector_ids = source_100->GetVectorIds(); + ASSERT_EQ(vector_ids.size(), 100); + + engine::MemTableFile::Ptr memTableFile; + memTable.GetCurrentMemTableFile(memTableFile); + size_t singleVectorMem = sizeof(float) * TABLE_DIM; + ASSERT_EQ(memTableFile->GetCurrentMem(), n_100 * singleVectorMem); + + int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem; + std::vector vectors_128M; + BuildVectors(n_max, vectors_128M); + + engine::VectorSource::Ptr source_128M = std::make_shared(n_max, vectors_128M.data()); + status = memTable.Add(source_128M); + ASSERT_TRUE(status.ok()); + + vector_ids = source_128M->GetVectorIds(); + ASSERT_EQ(vector_ids.size(), n_max); + + memTable.GetCurrentMemTableFile(memTableFile); + ASSERT_EQ(memTableFile->GetCurrentMem(), n_100 * singleVectorMem); + + ASSERT_EQ(memTable.GetStackSize(), 2); + + int64_t n_1G = 1024000; + std::vector vectors_1G; + BuildVectors(n_1G, vectors_1G); + + engine::VectorSource::Ptr source_1G = std::make_shared(n_1G, vectors_1G.data()); + + status = memTable.Add(source_1G); + ASSERT_TRUE(status.ok()); + + vector_ids = source_1G->GetVectorIds(); + ASSERT_EQ(vector_ids.size(), n_1G); + + int expectedStackSize = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM); + ASSERT_EQ(memTable.GetStackSize(), expectedStackSize); + + status = impl_->DropAll(); + ASSERT_TRUE(status.ok()); +} + + From 8f42ef678d577af061f522575b9aa60c844a09f6 Mon Sep 17 00:00:00 2001 From: zhiru Date: Fri, 5 Jul 2019 15:57:49 +0800 Subject: [PATCH 21/29] update Former-commit-id: b5c019432679df7fcdf3aacd0e061ee91ddf9609 --- cpp/src/db/MemTableFile.cpp | 10 ++++++++-- cpp/src/db/MemTableFile.h | 3 +++ cpp/src/db/VectorSource.cpp | 10 +++++----- cpp/src/db/VectorSource.h | 8 +++++++- cpp/unittest/db/mem_test.cpp | 8 ++++++-- 5 files changed, 29 insertions(+), 10 deletions(-) diff --git a/cpp/src/db/MemTableFile.cpp b/cpp/src/db/MemTableFile.cpp index 26bc0d38e9..58b76ab834 100644 --- a/cpp/src/db/MemTableFile.cpp +++ b/cpp/src/db/MemTableFile.cpp @@ -1,6 +1,7 @@ #include "MemTableFile.h" #include "Constants.h" #include "Log.h" +#include "EngineFactory.h" #include @@ -14,7 +15,12 @@ MemTableFile::MemTableFile(const std::string& table_id, meta_(meta) { current_mem_ = 0; - CreateTableFile(); + auto status = CreateTableFile(); + if (status.ok()) { + execution_engine_ = EngineFactory::Build(table_file_schema_.dimension_, + table_file_schema_.location_, + (EngineType)table_file_schema_.engine_type_); + } } Status MemTableFile::CreateTableFile() { @@ -39,7 +45,7 @@ Status MemTableFile::Add(const VectorSource::Ptr& source) { if (memLeft >= singleVectorMemSize) { size_t numVectorsToAdd = std::ceil(memLeft / singleVectorMemSize); size_t numVectorsAdded; - auto status = source->Add(table_file_schema_, numVectorsToAdd, numVectorsAdded); + auto status = source->Add(execution_engine_, table_file_schema_, numVectorsToAdd, numVectorsAdded); if (status.ok()) { current_mem_ += (numVectorsAdded * singleVectorMemSize); } diff --git a/cpp/src/db/MemTableFile.h b/cpp/src/db/MemTableFile.h index 1efe4c0bfe..04f30178ea 100644 --- a/cpp/src/db/MemTableFile.h +++ b/cpp/src/db/MemTableFile.h @@ -3,6 +3,7 @@ #include "Status.h" #include "Meta.h" #include "VectorSource.h" +#include "ExecutionEngine.h" namespace zilliz { namespace milvus { @@ -37,6 +38,8 @@ private: size_t current_mem_; + ExecutionEnginePtr execution_engine_; + }; //MemTableFile } // namespace engine diff --git a/cpp/src/db/VectorSource.cpp b/cpp/src/db/VectorSource.cpp index dff5423c6f..f7cef994fa 100644 --- a/cpp/src/db/VectorSource.cpp +++ b/cpp/src/db/VectorSource.cpp @@ -16,7 +16,10 @@ VectorSource::VectorSource(const size_t &n, current_num_vectors_added = 0; } -Status VectorSource::Add(const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added) { +Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, + const meta::TableFileSchema& table_file_schema, + const size_t& num_vectors_to_add, + size_t& num_vectors_added) { if (table_file_schema.dimension_ <= 0) { std::string errMsg = "VectorSource::Add: table_file_schema dimension = " + @@ -24,14 +27,11 @@ Status VectorSource::Add(const meta::TableFileSchema& table_file_schema, const s ENGINE_LOG_ERROR << errMsg; return Status::Error(errMsg); } - ExecutionEnginePtr engine = EngineFactory::Build(table_file_schema.dimension_, - table_file_schema.location_, - (EngineType)table_file_schema.engine_type_); num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ? num_vectors_to_add : n_ - current_num_vectors_added; IDNumbers vector_ids_to_add; id_generator_->GetNextIDNumbers(num_vectors_added, vector_ids_to_add); - Status status = engine->AddWithIds(num_vectors_added, vectors_ + current_num_vectors_added, vector_ids_to_add.data()); + Status status = execution_engine->AddWithIds(num_vectors_added, vectors_ + current_num_vectors_added, vector_ids_to_add.data()); if (status.ok()) { current_num_vectors_added += num_vectors_added; vector_ids_.insert(vector_ids_.end(), vector_ids_to_add.begin(), vector_ids_to_add.end()); diff --git a/cpp/src/db/VectorSource.h b/cpp/src/db/VectorSource.h index 170f3634cf..597eee4ad8 100644 --- a/cpp/src/db/VectorSource.h +++ b/cpp/src/db/VectorSource.h @@ -3,6 +3,7 @@ #include "Status.h" #include "Meta.h" #include "IDGenerator.h" +#include "ExecutionEngine.h" namespace zilliz { namespace milvus { @@ -16,7 +17,10 @@ public: VectorSource(const size_t& n, const float* vectors); - Status Add(const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added); + Status Add(const ExecutionEnginePtr& execution_engine, + const meta::TableFileSchema& table_file_schema, + const size_t& num_vectors_to_add, + size_t& num_vectors_added); size_t GetNumVectorsAdded(); @@ -24,6 +28,8 @@ public: IDNumbers GetVectorIds(); +// Status Serialize(); + private: const size_t n_; diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp index 8418b9cd2d..111914f8a9 100644 --- a/cpp/unittest/db/mem_test.cpp +++ b/cpp/unittest/db/mem_test.cpp @@ -6,6 +6,7 @@ #include "utils.h" #include "db/Factories.h" #include "db/Constants.h" +#include "db/EngineFactory.h" using namespace zilliz::milvus; @@ -55,7 +56,10 @@ TEST(MEM_TEST, VECTOR_SOURCE_TEST) { engine::VectorSource source(n, vectors.data()); size_t num_vectors_added; - status = source.Add(table_file_schema, 50, num_vectors_added); + engine::ExecutionEnginePtr execution_engine_ = engine::EngineFactory::Build(table_file_schema.dimension_, + table_file_schema.location_, + (engine::EngineType)table_file_schema.engine_type_); + status = source.Add(execution_engine_, table_file_schema, 50, num_vectors_added); ASSERT_TRUE(status.ok()); ASSERT_EQ(num_vectors_added, 50); @@ -63,7 +67,7 @@ TEST(MEM_TEST, VECTOR_SOURCE_TEST) { engine::IDNumbers vector_ids = source.GetVectorIds(); ASSERT_EQ(vector_ids.size(), 50); - status = source.Add(table_file_schema, 60, num_vectors_added); + status = source.Add(execution_engine_, table_file_schema, 60, num_vectors_added); ASSERT_TRUE(status.ok()); ASSERT_EQ(num_vectors_added, 50); From 9f38b96eddf222c57bb4b1eb6b23edf7d6b16735 Mon Sep 17 00:00:00 2001 From: zhiru Date: Fri, 5 Jul 2019 16:46:15 +0800 Subject: [PATCH 22/29] Implemented add and serialize Former-commit-id: 25fbbc2185efc4b45ea8f4693fea0ba0001d267e --- cpp/src/db/MemTable.cpp | 32 +++++++++++++++++++-------- cpp/src/db/MemTable.h | 10 ++++++--- cpp/src/db/MemTableFile.cpp | 42 +++++++++++++++++++++++++++++++++--- cpp/src/db/MemTableFile.h | 8 +++++-- cpp/src/db/VectorSource.cpp | 12 +++++------ cpp/src/db/VectorSource.h | 2 -- cpp/unittest/db/mem_test.cpp | 11 +++++++--- 7 files changed, 89 insertions(+), 28 deletions(-) diff --git a/cpp/src/db/MemTable.cpp b/cpp/src/db/MemTable.cpp index 032d479999..86554695c8 100644 --- a/cpp/src/db/MemTable.cpp +++ b/cpp/src/db/MemTable.cpp @@ -6,24 +6,26 @@ namespace milvus { namespace engine { MemTable::MemTable(const std::string& table_id, - const std::shared_ptr& meta) : + const std::shared_ptr& meta, + const Options& options) : table_id_(table_id), - meta_(meta) { + meta_(meta), + options_(options) { } Status MemTable::Add(VectorSource::Ptr& source) { while (!source->AllAdded()) { MemTableFile::Ptr currentMemTableFile; - if (!mem_table_file_stack_.empty()) { - currentMemTableFile = mem_table_file_stack_.top(); + if (!mem_table_file_list_.empty()) { + currentMemTableFile = mem_table_file_list_.back(); } Status status; - if (mem_table_file_stack_.empty() || currentMemTableFile->isFull()) { - MemTableFile::Ptr newMemTableFile = std::make_shared(table_id_, meta_); + if (mem_table_file_list_.empty() || currentMemTableFile->IsFull()) { + MemTableFile::Ptr newMemTableFile = std::make_shared(table_id_, meta_, options_); status = newMemTableFile->Add(source); if (status.ok()) { - mem_table_file_stack_.push(newMemTableFile); + mem_table_file_list_.emplace_back(newMemTableFile); } } else { @@ -39,11 +41,23 @@ Status MemTable::Add(VectorSource::Ptr& source) { } void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file) { - mem_table_file = mem_table_file_stack_.top(); + mem_table_file = mem_table_file_list_.back(); } size_t MemTable::GetStackSize() { - return mem_table_file_stack_.size(); + return mem_table_file_list_.size(); +} + +Status MemTable::Serialize() { + for (auto& memTableFile : mem_table_file_list_) { + auto status = memTableFile->Serialize(); + if (!status.ok()) { + std::string errMsg = "MemTable::Serialize failed: " + status.ToString(); + ENGINE_LOG_ERROR << errMsg; + return Status::Error(errMsg); + } + } + return Status::OK(); } } // namespace engine diff --git a/cpp/src/db/MemTable.h b/cpp/src/db/MemTable.h index b9fe4147d8..d5c7cc9e85 100644 --- a/cpp/src/db/MemTable.h +++ b/cpp/src/db/MemTable.h @@ -15,10 +15,10 @@ class MemTable { public: using Ptr = std::shared_ptr; - using MemTableFileStack = std::stack; + using MemTableFileList = std::vector; using MetaPtr = meta::Meta::Ptr; - MemTable(const std::string& table_id, const std::shared_ptr& meta); + MemTable(const std::string& table_id, const std::shared_ptr& meta, const Options& options); Status Add(VectorSource::Ptr& source); @@ -26,13 +26,17 @@ public: size_t GetStackSize(); + Status Serialize(); + private: const std::string table_id_; - MemTableFileStack mem_table_file_stack_; + MemTableFileList mem_table_file_list_; MetaPtr meta_; + Options options_; + }; //MemTable } // namespace engine diff --git a/cpp/src/db/MemTableFile.cpp b/cpp/src/db/MemTableFile.cpp index 58b76ab834..0ff91de00b 100644 --- a/cpp/src/db/MemTableFile.cpp +++ b/cpp/src/db/MemTableFile.cpp @@ -2,6 +2,7 @@ #include "Constants.h" #include "Log.h" #include "EngineFactory.h" +#include "metrics/Metrics.h" #include @@ -10,9 +11,11 @@ namespace milvus { namespace engine { MemTableFile::MemTableFile(const std::string& table_id, - const std::shared_ptr& meta) : + const std::shared_ptr& meta, + const Options& options) : table_id_(table_id), - meta_(meta) { + meta_(meta), + options_(options) { current_mem_ = 0; auto status = CreateTableFile(); @@ -40,6 +43,13 @@ Status MemTableFile::CreateTableFile() { Status MemTableFile::Add(const VectorSource::Ptr& source) { + if (table_file_schema_.dimension_ <= 0) { + std::string errMsg = "MemTableFile::Add: table_file_schema dimension = " + + std::to_string(table_file_schema_.dimension_) + ", table_id = " + table_file_schema_.table_id_; + ENGINE_LOG_ERROR << errMsg; + return Status::Error(errMsg); + } + size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; size_t memLeft = GetMemLeft(); if (memLeft >= singleVectorMemSize) { @@ -62,11 +72,37 @@ size_t MemTableFile::GetMemLeft() { return (MAX_TABLE_FILE_MEM - current_mem_); } -bool MemTableFile::isFull() { +bool MemTableFile::IsFull() { size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; return (GetMemLeft() < singleVectorMemSize); } +Status MemTableFile::Serialize() { + + auto start_time = METRICS_NOW_TIME; + + auto size = GetCurrentMem(); + + execution_engine_->Serialize(); + auto end_time = METRICS_NOW_TIME; + auto total_time = METRICS_MICROSECONDS(start_time, end_time); + table_file_schema_.size_ = size; + + server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet((double)size/total_time); + + table_file_schema_.file_type_ = (size >= options_.index_trigger_size) ? + meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW; + + auto status = meta_->UpdateTableFile(table_file_schema_); + + LOG(DEBUG) << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index") + << " file " << table_file_schema_.file_id_ << " of size " << (double)size / (double)M << " M"; + + execution_engine_->Cache(); + + return status; +} + } // namespace engine } // namespace milvus } // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTableFile.h b/cpp/src/db/MemTableFile.h index 04f30178ea..1be0ae78ba 100644 --- a/cpp/src/db/MemTableFile.h +++ b/cpp/src/db/MemTableFile.h @@ -16,7 +16,7 @@ public: using Ptr = std::shared_ptr; using MetaPtr = meta::Meta::Ptr; - MemTableFile(const std::string& table_id, const std::shared_ptr& meta); + MemTableFile(const std::string& table_id, const std::shared_ptr& meta, const Options& options); Status Add(const VectorSource::Ptr& source); @@ -24,7 +24,9 @@ public: size_t GetMemLeft(); - bool isFull(); + bool IsFull(); + + Status Serialize(); private: @@ -36,6 +38,8 @@ private: MetaPtr meta_; + Options options_; + size_t current_mem_; ExecutionEnginePtr execution_engine_; diff --git a/cpp/src/db/VectorSource.cpp b/cpp/src/db/VectorSource.cpp index f7cef994fa..b113b9ad5e 100644 --- a/cpp/src/db/VectorSource.cpp +++ b/cpp/src/db/VectorSource.cpp @@ -2,6 +2,7 @@ #include "ExecutionEngine.h" #include "EngineFactory.h" #include "Log.h" +#include "metrics/Metrics.h" namespace zilliz { namespace milvus { @@ -21,12 +22,7 @@ Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, const size_t& num_vectors_to_add, size_t& num_vectors_added) { - if (table_file_schema.dimension_ <= 0) { - std::string errMsg = "VectorSource::Add: table_file_schema dimension = " + - std::to_string(table_file_schema.dimension_) + ", table_id = " + table_file_schema.table_id_; - ENGINE_LOG_ERROR << errMsg; - return Status::Error(errMsg); - } + auto start_time = METRICS_NOW_TIME; num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ? num_vectors_to_add : n_ - current_num_vectors_added; IDNumbers vector_ids_to_add; @@ -40,6 +36,10 @@ Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString(); } + auto end_time = METRICS_NOW_TIME; + auto total_time = METRICS_MICROSECONDS(start_time, end_time); + server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast(n_), static_cast(table_file_schema.dimension_), total_time); + return status; } diff --git a/cpp/src/db/VectorSource.h b/cpp/src/db/VectorSource.h index 597eee4ad8..dec31f39e1 100644 --- a/cpp/src/db/VectorSource.h +++ b/cpp/src/db/VectorSource.h @@ -28,8 +28,6 @@ public: IDNumbers GetVectorIds(); -// Status Serialize(); - private: const size_t n_; diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp index 111914f8a9..f68d1eb8e3 100644 --- a/cpp/unittest/db/mem_test.cpp +++ b/cpp/unittest/db/mem_test.cpp @@ -86,12 +86,13 @@ TEST(MEM_TEST, VECTOR_SOURCE_TEST) { TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); + auto options = engine::OptionsFactory::Build(); engine::meta::TableSchema table_schema = BuildTableSchema(); auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); - engine::MemTableFile memTableFile(TABLE_NAME, impl_); + engine::MemTableFile memTableFile(TABLE_NAME, impl_, options); int64_t n_100 = 100; std::vector vectors_100; @@ -120,7 +121,7 @@ TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { vector_ids = source_128M->GetVectorIds(); ASSERT_EQ(vector_ids.size(), n_max - n_100); - ASSERT_TRUE(memTableFile.isFull()); + ASSERT_TRUE(memTableFile.IsFull()); status = impl_->DropAll(); ASSERT_TRUE(status.ok()); @@ -129,6 +130,7 @@ TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { TEST(MEM_TEST, MEM_TABLE_TEST) { std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); + auto options = engine::OptionsFactory::Build(); engine::meta::TableSchema table_schema = BuildTableSchema(); auto status = impl_->CreateTable(table_schema); @@ -140,7 +142,7 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { engine::VectorSource::Ptr source_100 = std::make_shared(n_100, vectors_100.data()); - engine::MemTable memTable(TABLE_NAME, impl_); + engine::MemTable memTable(TABLE_NAME, impl_, options); status = memTable.Add(source_100); ASSERT_TRUE(status.ok()); @@ -184,6 +186,9 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { int expectedStackSize = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM); ASSERT_EQ(memTable.GetStackSize(), expectedStackSize); + status = memTable.Serialize(); + ASSERT_TRUE(status.ok()); + status = impl_->DropAll(); ASSERT_TRUE(status.ok()); } From 9ec7b773c39552f1987cbc71e046c34e2885448e Mon Sep 17 00:00:00 2001 From: quicksilver Date: Fri, 5 Jul 2019 18:07:40 +0800 Subject: [PATCH 23/29] MS-161 - Add CI / CD Module to Milvus Project Former-commit-id: 16869771664aeea633d14f2db05c7a6fb8441e64 --- CHANGELOGS.md | 1 + ci/function/file_transfer.groovy | 10 + ci/jenkinsfile/cleanup_dev.groovy | 12 + ci/jenkinsfile/deploy2dev.groovy | 11 + ci/jenkinsfile/dev_test.groovy | 17 ++ ci/jenkinsfile/milvus_build.groovy | 17 ++ ci/jenkinsfile/milvus_build_no_ut.groovy | 17 ++ ci/jenkinsfile/packaged_milvus.groovy | 44 ++++ ci/jenkinsfile/packaged_milvus_no_ut.groovy | 26 ++ ci/jenkinsfile/publish_docker.groovy | 31 +++ ci/jenkinsfile/upload_dev_test_out.groovy | 26 ++ ci/main_jenkinsfile | 256 ++++++++++++++++++++ ci/main_jenkinsfile_no_ut | 256 ++++++++++++++++++++ ci/pod_containers/milvus-engine-build.yaml | 13 + ci/pod_containers/milvus-testframework.yaml | 13 + ci/pod_containers/publish-docker.yaml | 22 ++ 16 files changed, 772 insertions(+) create mode 100644 ci/function/file_transfer.groovy create mode 100644 ci/jenkinsfile/cleanup_dev.groovy create mode 100644 ci/jenkinsfile/deploy2dev.groovy create mode 100644 ci/jenkinsfile/dev_test.groovy create mode 100644 ci/jenkinsfile/milvus_build.groovy create mode 100644 ci/jenkinsfile/milvus_build_no_ut.groovy create mode 100644 ci/jenkinsfile/packaged_milvus.groovy create mode 100644 ci/jenkinsfile/packaged_milvus_no_ut.groovy create mode 100644 ci/jenkinsfile/publish_docker.groovy create mode 100644 ci/jenkinsfile/upload_dev_test_out.groovy create mode 100644 ci/main_jenkinsfile create mode 100644 ci/main_jenkinsfile_no_ut create mode 100644 ci/pod_containers/milvus-engine-build.yaml create mode 100644 ci/pod_containers/milvus-testframework.yaml create mode 100644 ci/pod_containers/publish-docker.yaml diff --git a/CHANGELOGS.md b/CHANGELOGS.md index a5d7bfec58..def4965a41 100644 --- a/CHANGELOGS.md +++ b/CHANGELOGS.md @@ -15,3 +15,4 @@ Please mark all change in change log and use the ticket from JIRA. ### Task - MS-1 - Add CHANGELOG.md +- MS-161 - Add CI / CD Module to Milvus Project diff --git a/ci/function/file_transfer.groovy b/ci/function/file_transfer.groovy new file mode 100644 index 0000000000..bebae14832 --- /dev/null +++ b/ci/function/file_transfer.groovy @@ -0,0 +1,10 @@ +def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) { + if (protocol == "ftp") { + ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [ + [configName: "${remoteIP}", transfers: [ + [asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true + ] + ] + } +} +return this diff --git a/ci/jenkinsfile/cleanup_dev.groovy b/ci/jenkinsfile/cleanup_dev.groovy new file mode 100644 index 0000000000..32ee43d3b1 --- /dev/null +++ b/ci/jenkinsfile/cleanup_dev.groovy @@ -0,0 +1,12 @@ +try { + sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}" + + if (currentBuild.result == 'ABORTED') { + throw new hudson.AbortException("Dev Test Aborted !") + } else if (currentBuild.result == 'FAILURE') { + error("Dev Test Failure !") + } +} catch (exc) { + updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed' + throw exc +} diff --git a/ci/jenkinsfile/deploy2dev.groovy b/ci/jenkinsfile/deploy2dev.groovy new file mode 100644 index 0000000000..6e4a23cfe7 --- /dev/null +++ b/ci/jenkinsfile/deploy2dev.groovy @@ -0,0 +1,11 @@ +try { + sh 'helm init --client-only --skip-refresh' + sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus' + sh 'helm repo update' + sh "helm install --set engine.image.repository=registry.zilliz.com/${PROJECT_NAME}/engine --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} --version 0.3.0 milvus/milvus-gpu" +} catch (exc) { + updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed' + echo 'Helm running failed!' + sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}" + throw exc +} diff --git a/ci/jenkinsfile/dev_test.groovy b/ci/jenkinsfile/dev_test.groovy new file mode 100644 index 0000000000..f5808cef40 --- /dev/null +++ b/ci/jenkinsfile/dev_test.groovy @@ -0,0 +1,17 @@ +container('milvus-testframework') { + timeout(time: 10, unit: 'MINUTES') { + gitlabCommitStatus(name: 'Dev Test') { + try { + dir ("${PROJECT_NAME}_test") { + checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git"]]]) + sh 'python3 -m pip install -r requirements.txt' + sh "pytest . --alluredir=test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.kube-opt.svc.cluster.local" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Dev Test', state: 'failed' + currentBuild.result = 'FAILURE' + echo 'Milvus Test Failed !' + } + } + } +} diff --git a/ci/jenkinsfile/milvus_build.groovy b/ci/jenkinsfile/milvus_build.groovy new file mode 100644 index 0000000000..ed07d2b992 --- /dev/null +++ b/ci/jenkinsfile/milvus_build.groovy @@ -0,0 +1,17 @@ +container('milvus-build-env') { + timeout(time: 20, unit: 'MINUTES') { + gitlabCommitStatus(name: 'Build Engine') { + dir ("milvus_engine") { + try { + checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git"]]]) + dir ("cpp") { + sh "./build.sh -t ${params.BUILD_TYPE} -u -c" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Build Engine', state: 'failed' + throw exc + } + } + } + } +} diff --git a/ci/jenkinsfile/milvus_build_no_ut.groovy b/ci/jenkinsfile/milvus_build_no_ut.groovy new file mode 100644 index 0000000000..02b971de2f --- /dev/null +++ b/ci/jenkinsfile/milvus_build_no_ut.groovy @@ -0,0 +1,17 @@ +container('milvus-build-env') { + timeout(time: 20, unit: 'MINUTES') { + gitlabCommitStatus(name: 'Build Engine') { + dir ("milvus_engine") { + try { + checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git"]]]) + dir ("cpp") { + sh "./build.sh -t ${params.BUILD_TYPE}" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Build Engine', state: 'failed' + throw exc + } + } + } + } +} diff --git a/ci/jenkinsfile/packaged_milvus.groovy b/ci/jenkinsfile/packaged_milvus.groovy new file mode 100644 index 0000000000..407b100589 --- /dev/null +++ b/ci/jenkinsfile/packaged_milvus.groovy @@ -0,0 +1,44 @@ +container('milvus-build-env') { + timeout(time: 5, unit: 'MINUTES') { + dir ("milvus_engine") { + dir ("cpp") { + gitlabCommitStatus(name: 'Packaged Engine') { + if (fileExists('milvus')) { + try { + sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus" + def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy" + fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage') + if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\"" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed' + throw exc + } + } else { + updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed' + error("Milvus binary directory don't exists!") + } + } + + gitlabCommitStatus(name: 'Packaged Engine lcov') { + if (fileExists('lcov_out')) { + try { + def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy" + fileTransfer.FileTransfer("lcov_out/", "${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}", 'nas storage') + if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + echo "Milvus lcov out Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}/lcov_out/\"" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed' + throw exc + } + } else { + updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed' + error("Milvus lcov out directory don't exists!") + } + } + } + } + } +} diff --git a/ci/jenkinsfile/packaged_milvus_no_ut.groovy b/ci/jenkinsfile/packaged_milvus_no_ut.groovy new file mode 100644 index 0000000000..b6c31540a1 --- /dev/null +++ b/ci/jenkinsfile/packaged_milvus_no_ut.groovy @@ -0,0 +1,26 @@ +container('milvus-build-env') { + timeout(time: 5, unit: 'MINUTES') { + dir ("milvus_engine") { + dir ("cpp") { + gitlabCommitStatus(name: 'Packaged Engine') { + if (fileExists('milvus')) { + try { + sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus" + def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy" + fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage') + if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\"" + } + } catch (exc) { + updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed' + throw exc + } + } else { + updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed' + error("Milvus binary directory don't exists!") + } + } + } + } + } +} diff --git a/ci/jenkinsfile/publish_docker.groovy b/ci/jenkinsfile/publish_docker.groovy new file mode 100644 index 0000000000..04f1a8567d --- /dev/null +++ b/ci/jenkinsfile/publish_docker.groovy @@ -0,0 +1,31 @@ +container('publish-docker') { + timeout(time: 15, unit: 'MINUTES') { + gitlabCommitStatus(name: 'Publish Engine Docker') { + try { + dir ("${PROJECT_NAME}_build") { + checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git"]]]) + dir ("docker/deploy/ubuntu16.04/free_version") { + sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz" + sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz" + try { + docker.withRegistry('https://registry.zilliz.com', 'a54e38ef-c424-4ea9-9224-b25fc20e3924') { + def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}") + customImage.push() + } + echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}" + } catch (exc) { + updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled' + throw exc + } finally { + sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}" + } + } + } + } catch (exc) { + updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed' + echo 'Publish docker failed!' + throw exc + } + } + } +} diff --git a/ci/jenkinsfile/upload_dev_test_out.groovy b/ci/jenkinsfile/upload_dev_test_out.groovy new file mode 100644 index 0000000000..c401b16608 --- /dev/null +++ b/ci/jenkinsfile/upload_dev_test_out.groovy @@ -0,0 +1,26 @@ +container('milvus-testframework') { + timeout(time: 5, unit: 'MINUTES') { + dir ("${PROJECT_NAME}_test") { + gitlabCommitStatus(name: 'Upload Dev Test Out') { + if (fileExists('test_out')) { + try { + def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy" + fileTransfer.FileTransfer("test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage') + if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\"" + } + } catch (hudson.AbortException ae) { + updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'canceled' + currentBuild.result = 'ABORTED' + } catch (exc) { + updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'failed' + currentBuild.result = 'FAILURE' + } + } else { + updateGitlabCommitStatus name: 'Upload Dev Test Out', state: 'failed' + echo "Milvus Dev Test Out directory don't exists!" + } + } + } + } +} diff --git a/ci/main_jenkinsfile b/ci/main_jenkinsfile new file mode 100644 index 0000000000..c144c46685 --- /dev/null +++ b/ci/main_jenkinsfile @@ -0,0 +1,256 @@ +pipeline { + agent none + + options { + timestamps() + } + + environment { + PROJECT_NAME = "milvus" + LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase() + SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}" + GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}" + SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}" + DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}" + } + + stages { + stage("Ubuntu 16.04") { + environment { + PACKAGE_VERSION = VersionNumber([ + versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}' + ]); + + DOCKER_VERSION = VersionNumber([ + versionNumberString : '${DOCKER_VERSION_STR}' + ]); + } + + stages { + stage("Run Build") { + agent { + kubernetes { + cloud 'build-kubernetes' + label 'build' + defaultContainer 'jnlp' + containerTemplate { + name 'milvus-build-env' + image 'registry.zilliz.com/milvus/milvus-build-env:v0.10' + ttyEnabled true + command 'cat' + } + } + } + stages { + stage('Build') { + steps { + gitlabCommitStatus(name: 'Build') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy" + load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Build', state: 'canceled' + echo "Milvus Build aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Build', state: 'failed' + echo "Milvus Build failure !" + } + } + } + } + + stage("Publish docker and helm") { + agent { + kubernetes { + label 'publish' + defaultContainer 'jnlp' + yaml """ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: publish + componet: docker +spec: + containers: + - name: publish-docker + image: registry.zilliz.com/library/zilliz_docker:v1.0.0 + securityContext: + privileged: true + command: + - cat + tty: true + volumeMounts: + - name: docker-sock + mountPath: /var/run/docker.sock + volumes: + - name: docker-sock + hostPath: + path: /var/run/docker.sock +""" + } + } + stages { + stage('Publish Docker') { + steps { + gitlabCommitStatus(name: 'Publish Docker') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled' + echo "Milvus Publish Docker aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Publish Docker', state: 'failed' + echo "Milvus Publish Docker failure !" + } + } + } + } + + stage("Deploy to Development") { + stages { + stage("Deploy to Dev") { + agent { + kubernetes { + label 'jenkins-slave' + defaultContainer 'jnlp' + } + } + stages { + stage('Deploy') { + steps { + gitlabCommitStatus(name: 'Deloy to Dev') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Deloy to Dev', state: 'canceled' + echo "Milvus Deloy to Dev aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed' + echo "Milvus Deloy to Dev failure !" + } + } + } + } + + stage("Dev Test") { + agent { + kubernetes { + label 'test' + defaultContainer 'jnlp' + containerTemplate { + name 'milvus-testframework' + image 'registry.zilliz.com/milvus/milvus-test:v0.1' + ttyEnabled true + command 'cat' + } + } + } + stages { + stage('Test') { + steps { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy" + load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy" + } + } + } + } + } + + stage ("Cleanup Dev") { + agent { + kubernetes { + label 'jenkins-slave' + defaultContainer 'jnlp' + } + } + stages { + stage('Cleanup') { + steps { + gitlabCommitStatus(name: 'Cleanup Dev') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Cleanup Dev', state: 'canceled' + echo "Milvus Cleanup Dev aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed' + echo "Milvus Cleanup Dev failure !" + } + } + } + } + } + } + } + } + } + + post { + success { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'success' + echo "Milvus CI/CD success !" + } + } + + aborted { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'canceled' + echo "Milvus CI/CD aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'failed' + echo "Milvus CI/CD failure !" + } + } + } +} diff --git a/ci/main_jenkinsfile_no_ut b/ci/main_jenkinsfile_no_ut new file mode 100644 index 0000000000..277ec155a5 --- /dev/null +++ b/ci/main_jenkinsfile_no_ut @@ -0,0 +1,256 @@ +pipeline { + agent none + + options { + timestamps() + } + + environment { + PROJECT_NAME = "milvus" + LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase() + SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}" + GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}" + SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}" + DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}" + } + + stages { + stage("Ubuntu 16.04") { + environment { + PACKAGE_VERSION = VersionNumber([ + versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}' + ]); + + DOCKER_VERSION = VersionNumber([ + versionNumberString : '${DOCKER_VERSION_STR}' + ]); + } + + stages { + stage("Run Build") { + agent { + kubernetes { + cloud 'build-kubernetes' + label 'build' + defaultContainer 'jnlp' + containerTemplate { + name 'milvus-build-env' + image 'registry.zilliz.com/milvus/milvus-build-env:v0.10' + ttyEnabled true + command 'cat' + } + } + } + stages { + stage('Build') { + steps { + gitlabCommitStatus(name: 'Build') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build_no_ut.groovy" + load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus_no_ut.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Build', state: 'canceled' + echo "Milvus Build aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Build', state: 'failed' + echo "Milvus Build failure !" + } + } + } + } + + stage("Publish docker and helm") { + agent { + kubernetes { + label 'publish' + defaultContainer 'jnlp' + yaml """ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: publish + componet: docker +spec: + containers: + - name: publish-docker + image: registry.zilliz.com/library/zilliz_docker:v1.0.0 + securityContext: + privileged: true + command: + - cat + tty: true + volumeMounts: + - name: docker-sock + mountPath: /var/run/docker.sock + volumes: + - name: docker-sock + hostPath: + path: /var/run/docker.sock +""" + } + } + stages { + stage('Publish Docker') { + steps { + gitlabCommitStatus(name: 'Publish Docker') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled' + echo "Milvus Publish Docker aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Publish Docker', state: 'failed' + echo "Milvus Publish Docker failure !" + } + } + } + } + + stage("Deploy to Development") { + stages { + stage("Deploy to Dev") { + agent { + kubernetes { + label 'jenkins-slave' + defaultContainer 'jnlp' + } + } + stages { + stage('Deploy') { + steps { + gitlabCommitStatus(name: 'Deloy to Dev') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Deloy to Dev', state: 'canceled' + echo "Milvus Deloy to Dev aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Deloy to Dev', state: 'failed' + echo "Milvus Deloy to Dev failure !" + } + } + } + } + + stage("Dev Test") { + agent { + kubernetes { + label 'test' + defaultContainer 'jnlp' + containerTemplate { + name 'milvus-testframework' + image 'registry.zilliz.com/milvus/milvus-test:v0.1' + ttyEnabled true + command 'cat' + } + } + } + stages { + stage('Test') { + steps { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy" + load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy" + } + } + } + } + } + + stage ("Cleanup Dev") { + agent { + kubernetes { + label 'jenkins-slave' + defaultContainer 'jnlp' + } + } + stages { + stage('Cleanup') { + steps { + gitlabCommitStatus(name: 'Cleanup Dev') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy" + } + } + } + } + } + post { + aborted { + script { + updateGitlabCommitStatus name: 'Cleanup Dev', state: 'canceled' + echo "Milvus Cleanup Dev aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'Cleanup Dev', state: 'failed' + echo "Milvus Cleanup Dev failure !" + } + } + } + } + } + } + } + } + } + + post { + success { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'success' + echo "Milvus CI/CD success !" + } + } + + aborted { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'canceled' + echo "Milvus CI/CD aborted !" + } + } + + failure { + script { + updateGitlabCommitStatus name: 'CI/CD', state: 'failed' + echo "Milvus CI/CD failure !" + } + } + } +} diff --git a/ci/pod_containers/milvus-engine-build.yaml b/ci/pod_containers/milvus-engine-build.yaml new file mode 100644 index 0000000000..cd5352ffef --- /dev/null +++ b/ci/pod_containers/milvus-engine-build.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: milvus + componet: build-env +spec: + containers: + - name: milvus-build-env + image: registry.zilliz.com/milvus/milvus-build-env:v0.9 + command: + - cat + tty: true diff --git a/ci/pod_containers/milvus-testframework.yaml b/ci/pod_containers/milvus-testframework.yaml new file mode 100644 index 0000000000..7a98fbca8e --- /dev/null +++ b/ci/pod_containers/milvus-testframework.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: milvus + componet: testframework +spec: + containers: + - name: milvus-testframework + image: registry.zilliz.com/milvus/milvus-test:v0.1 + command: + - cat + tty: true diff --git a/ci/pod_containers/publish-docker.yaml b/ci/pod_containers/publish-docker.yaml new file mode 100644 index 0000000000..268afb1331 --- /dev/null +++ b/ci/pod_containers/publish-docker.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: publish + componet: docker +spec: + containers: + - name: publish-docker + image: registry.zilliz.com/library/zilliz_docker:v1.0.0 + securityContext: + privileged: true + command: + - cat + tty: true + volumeMounts: + - name: docker-sock + mountPath: /var/run/docker.sock + volumes: + - name: docker-sock + hostPath: + path: /var/run/docker.sock From 6a6722a71c3544856a933d432ac9197a90c7e60f Mon Sep 17 00:00:00 2001 From: zhiru Date: Sun, 7 Jul 2019 13:50:39 +0800 Subject: [PATCH 24/29] add mem manager Former-commit-id: c9d77a1d0e9df6679c90fddefee22123cfb0acac --- cpp/src/db/DBImpl.cpp | 3 +- cpp/src/db/DBImpl.h | 4 +- cpp/src/db/Factories.cpp | 11 +++ cpp/src/db/Factories.h | 5 ++ cpp/src/db/MemManager.h | 14 ++-- cpp/src/db/MemManagerAbstract.h | 25 ++++++ cpp/src/db/MemTable.cpp | 10 ++- cpp/src/db/MemTable.h | 6 +- cpp/src/db/NewMemManager.cpp | 92 +++++++++++++++++++++ cpp/src/db/NewMemManager.h | 54 +++++++++++++ cpp/src/db/VectorSource.cpp | 15 +++- cpp/unittest/db/mem_test.cpp | 137 +++++++++++++++++++++++++++++++- 12 files changed, 356 insertions(+), 20 deletions(-) create mode 100644 cpp/src/db/MemManagerAbstract.h create mode 100644 cpp/src/db/NewMemManager.cpp create mode 100644 cpp/src/db/NewMemManager.h diff --git a/cpp/src/db/DBImpl.cpp b/cpp/src/db/DBImpl.cpp index 0a1e8651e1..09a7c72201 100644 --- a/cpp/src/db/DBImpl.cpp +++ b/cpp/src/db/DBImpl.cpp @@ -87,8 +87,7 @@ DBImpl::DBImpl(const Options& options) compact_thread_pool_(1, 1), index_thread_pool_(1, 1) { meta_ptr_ = DBMetaImplFactory::Build(options.meta, options.mode); - mem_mgr_ = std::make_shared(meta_ptr_, options_); - // mem_mgr_ = (MemManagerPtr)(new MemManager(meta_ptr_, options_)); + mem_mgr_ = MemManagerFactory::Build(meta_ptr_, options_); if (options.mode != Options::MODE::READ_ONLY) { StartTimerTasks(); } diff --git a/cpp/src/db/DBImpl.h b/cpp/src/db/DBImpl.h index 9dcd174f8b..5601f1a33b 100644 --- a/cpp/src/db/DBImpl.h +++ b/cpp/src/db/DBImpl.h @@ -9,6 +9,7 @@ #include "MemManager.h" #include "Types.h" #include "utils/ThreadPool.h" +#include "MemManagerAbstract.h" #include #include @@ -33,7 +34,6 @@ class Meta; class DBImpl : public DB { public: using MetaPtr = meta::Meta::Ptr; - using MemManagerPtr = typename MemManager::Ptr; explicit DBImpl(const Options &options); @@ -123,7 +123,7 @@ class DBImpl : public DB { std::thread bg_timer_thread_; MetaPtr meta_ptr_; - MemManagerPtr mem_mgr_; + MemManagerAbstractPtr mem_mgr_; server::ThreadPool compact_thread_pool_; std::list> compact_thread_results_; diff --git a/cpp/src/db/Factories.cpp b/cpp/src/db/Factories.cpp index 4b24bd3a1c..d51727cbff 100644 --- a/cpp/src/db/Factories.cpp +++ b/cpp/src/db/Factories.cpp @@ -6,6 +6,8 @@ #include #include "Factories.h" #include "DBImpl.h" +#include "MemManager.h" +#include "NewMemManager.h" #include #include @@ -98,6 +100,15 @@ DB* DBFactory::Build(const Options& options) { return new DBImpl(options); } +MemManagerAbstractPtr MemManagerFactory::Build(const std::shared_ptr& meta, + const Options& options) { + bool useNew = true; + if (useNew) { + return std::make_shared(meta, options); + } + return std::make_shared(meta, options); +} + } // namespace engine } // namespace milvus } // namespace zilliz diff --git a/cpp/src/db/Factories.h b/cpp/src/db/Factories.h index 889922b17a..567bc0a8bc 100644 --- a/cpp/src/db/Factories.h +++ b/cpp/src/db/Factories.h @@ -10,6 +10,7 @@ #include "MySQLMetaImpl.h" #include "Options.h" #include "ExecutionEngine.h" +#include "MemManagerAbstract.h" #include #include @@ -36,6 +37,10 @@ struct DBFactory { static DB* Build(const Options&); }; +struct MemManagerFactory { + static MemManagerAbstractPtr Build(const std::shared_ptr& meta, const Options& options); +}; + } // namespace engine } // namespace milvus } // namespace zilliz diff --git a/cpp/src/db/MemManager.h b/cpp/src/db/MemManager.h index 0ce88d504d..95303889db 100644 --- a/cpp/src/db/MemManager.h +++ b/cpp/src/db/MemManager.h @@ -9,13 +9,13 @@ #include "IDGenerator.h" #include "Status.h" #include "Meta.h" +#include "MemManagerAbstract.h" #include #include #include #include #include -#include namespace zilliz { namespace milvus { @@ -62,7 +62,7 @@ private: -class MemManager { +class MemManager : public MemManagerAbstract { public: using MetaPtr = meta::Meta::Ptr; using MemVectorsPtr = typename MemVectors::Ptr; @@ -71,16 +71,16 @@ public: MemManager(const std::shared_ptr& meta, const Options& options) : meta_(meta), options_(options) {} - MemVectorsPtr GetMemByTable(const std::string& table_id); - Status InsertVectors(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids); + size_t n, const float* vectors, IDNumbers& vector_ids) override; - Status Serialize(std::set& table_ids); + Status Serialize(std::set& table_ids) override; - Status EraseMemVector(const std::string& table_id); + Status EraseMemVector(const std::string& table_id) override; private: + MemVectorsPtr GetMemByTable(const std::string& table_id); + Status InsertVectorsNoLock(const std::string& table_id, size_t n, const float* vectors, IDNumbers& vector_ids); Status ToImmutable(); diff --git a/cpp/src/db/MemManagerAbstract.h b/cpp/src/db/MemManagerAbstract.h new file mode 100644 index 0000000000..74222df1e8 --- /dev/null +++ b/cpp/src/db/MemManagerAbstract.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace zilliz { +namespace milvus { +namespace engine { + +class MemManagerAbstract { +public: + + virtual Status InsertVectors(const std::string& table_id, + size_t n, const float* vectors, IDNumbers& vector_ids) = 0; + + virtual Status Serialize(std::set& table_ids) = 0; + + virtual Status EraseMemVector(const std::string& table_id) = 0; + +}; // MemManagerAbstract + +using MemManagerAbstractPtr = std::shared_ptr; + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTable.cpp b/cpp/src/db/MemTable.cpp index 86554695c8..b282ad375a 100644 --- a/cpp/src/db/MemTable.cpp +++ b/cpp/src/db/MemTable.cpp @@ -44,7 +44,7 @@ void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file) { mem_table_file = mem_table_file_list_.back(); } -size_t MemTable::GetStackSize() { +size_t MemTable::GetTableFileCount() { return mem_table_file_list_.size(); } @@ -60,6 +60,14 @@ Status MemTable::Serialize() { return Status::OK(); } +bool MemTable::Empty() { + return mem_table_file_list_.empty(); +} + +std::string MemTable::GetTableId() { + return table_id_; +} + } // namespace engine } // namespace milvus } // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTable.h b/cpp/src/db/MemTable.h index d5c7cc9e85..e09d6ddac1 100644 --- a/cpp/src/db/MemTable.h +++ b/cpp/src/db/MemTable.h @@ -24,10 +24,14 @@ public: void GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file); - size_t GetStackSize(); + size_t GetTableFileCount(); Status Serialize(); + bool Empty(); + + std::string GetTableId(); + private: const std::string table_id_; diff --git a/cpp/src/db/NewMemManager.cpp b/cpp/src/db/NewMemManager.cpp new file mode 100644 index 0000000000..19aba68eb7 --- /dev/null +++ b/cpp/src/db/NewMemManager.cpp @@ -0,0 +1,92 @@ +#include "NewMemManager.h" +#include "VectorSource.h" + +namespace zilliz { +namespace milvus { +namespace engine { + +NewMemManager::MemTablePtr NewMemManager::GetMemByTable(const std::string& table_id) { + auto memIt = mem_id_map_.find(table_id); + if (memIt != mem_id_map_.end()) { + return memIt->second; + } + + mem_id_map_[table_id] = std::make_shared(table_id, meta_, options_); + return mem_id_map_[table_id]; +} + +Status NewMemManager::InsertVectors(const std::string& table_id_, + size_t n_, + const float* vectors_, + IDNumbers& vector_ids_) { + + + std::unique_lock lock(mutex_); + + return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_); +} + +Status NewMemManager::InsertVectorsNoLock(const std::string& table_id, + size_t n, + const float* vectors, + IDNumbers& vector_ids) { + MemTablePtr mem = GetMemByTable(table_id); + VectorSource::Ptr source = std::make_shared(n, vectors); + + auto status = mem->Add(source); + if (status.ok()) { + vector_ids = source->GetVectorIds(); + } + return status; +} + +Status NewMemManager::ToImmutable() { + std::unique_lock lock(mutex_); + MemIdMap temp_map; + for (auto& kv: mem_id_map_) { + if(kv.second->Empty()) { + temp_map.insert(kv); + continue;//empty table, no need to serialize + } + immu_mem_list_.push_back(kv.second); + } + + mem_id_map_.swap(temp_map); + return Status::OK(); +} + +Status NewMemManager::Serialize(std::set& table_ids) { + ToImmutable(); + std::unique_lock lock(serialization_mtx_); + table_ids.clear(); + for (auto& mem : immu_mem_list_) { + mem->Serialize(); + table_ids.insert(mem->GetTableId()); + } + immu_mem_list_.clear(); + return Status::OK(); +} + +Status NewMemManager::EraseMemVector(const std::string& table_id) { + {//erase MemVector from rapid-insert cache + std::unique_lock lock(mutex_); + mem_id_map_.erase(table_id); + } + + {//erase MemVector from serialize cache + std::unique_lock lock(serialization_mtx_); + MemList temp_list; + for (auto& mem : immu_mem_list_) { + if(mem->GetTableId() != table_id) { + temp_list.push_back(mem); + } + } + immu_mem_list_.swap(temp_list); + } + + return Status::OK(); +} + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/NewMemManager.h b/cpp/src/db/NewMemManager.h new file mode 100644 index 0000000000..a5f5a9ca13 --- /dev/null +++ b/cpp/src/db/NewMemManager.h @@ -0,0 +1,54 @@ +#pragma once + +#include "Meta.h" +#include "MemTable.h" +#include "Status.h" +#include "MemManagerAbstract.h" + +#include +#include +#include +#include +#include + +namespace zilliz { +namespace milvus { +namespace engine { + +class NewMemManager : public MemManagerAbstract { +public: + using MetaPtr = meta::Meta::Ptr; + using Ptr = std::shared_ptr; + using MemTablePtr = typename MemTable::Ptr; + + NewMemManager(const std::shared_ptr& meta, const Options& options) + : meta_(meta), options_(options) {} + + Status InsertVectors(const std::string& table_id, + size_t n, const float* vectors, IDNumbers& vector_ids) override; + + Status Serialize(std::set& table_ids) override; + + Status EraseMemVector(const std::string& table_id) override; + +private: + MemTablePtr GetMemByTable(const std::string& table_id); + + Status InsertVectorsNoLock(const std::string& table_id, + size_t n, const float* vectors, IDNumbers& vector_ids); + Status ToImmutable(); + + using MemIdMap = std::map; + using MemList = std::vector; + MemIdMap mem_id_map_; + MemList immu_mem_list_; + MetaPtr meta_; + Options options_; + std::mutex mutex_; + std::mutex serialization_mtx_; +}; // NewMemManager + + +} // namespace engine +} // namespace milvus +} // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/VectorSource.cpp b/cpp/src/db/VectorSource.cpp index b113b9ad5e..d032be51f6 100644 --- a/cpp/src/db/VectorSource.cpp +++ b/cpp/src/db/VectorSource.cpp @@ -24,13 +24,18 @@ Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, auto start_time = METRICS_NOW_TIME; - num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ? num_vectors_to_add : n_ - current_num_vectors_added; + num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ? + num_vectors_to_add : n_ - current_num_vectors_added; IDNumbers vector_ids_to_add; id_generator_->GetNextIDNumbers(num_vectors_added, vector_ids_to_add); - Status status = execution_engine->AddWithIds(num_vectors_added, vectors_ + current_num_vectors_added, vector_ids_to_add.data()); + Status status = execution_engine->AddWithIds(num_vectors_added, + vectors_ + current_num_vectors_added * table_file_schema.dimension_, + vector_ids_to_add.data()); if (status.ok()) { current_num_vectors_added += num_vectors_added; - vector_ids_.insert(vector_ids_.end(), vector_ids_to_add.begin(), vector_ids_to_add.end()); + vector_ids_.insert(vector_ids_.end(), + std::make_move_iterator(vector_ids_to_add.begin()), + std::make_move_iterator(vector_ids_to_add.end())); } else { ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString(); @@ -38,7 +43,9 @@ Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, auto end_time = METRICS_NOW_TIME; auto total_time = METRICS_MICROSECONDS(start_time, end_time); - server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast(n_), static_cast(table_file_schema.dimension_), total_time); + server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast(n_), + static_cast(table_file_schema.dimension_), + total_time); return status; } diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp index f68d1eb8e3..915610adcc 100644 --- a/cpp/unittest/db/mem_test.cpp +++ b/cpp/unittest/db/mem_test.cpp @@ -7,6 +7,11 @@ #include "db/Factories.h" #include "db/Constants.h" #include "db/EngineFactory.h" +#include "metrics/Metrics.h" + +#include +#include +#include using namespace zilliz::milvus; @@ -29,6 +34,9 @@ namespace { vectors.clear(); vectors.resize(n*TABLE_DIM); float* data = vectors.data(); +// std::random_device rd; +// std::mt19937 gen(rd()); +// std::uniform_real_distribution<> dis(0.0, 1.0); for(int i = 0; i < n; i++) { for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48(); data[TABLE_DIM * i] += i / 2000.; @@ -169,7 +177,7 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { memTable.GetCurrentMemTableFile(memTableFile); ASSERT_EQ(memTableFile->GetCurrentMem(), n_100 * singleVectorMem); - ASSERT_EQ(memTable.GetStackSize(), 2); + ASSERT_EQ(memTable.GetTableFileCount(), 2); int64_t n_1G = 1024000; std::vector vectors_1G; @@ -183,8 +191,8 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { vector_ids = source_1G->GetVectorIds(); ASSERT_EQ(vector_ids.size(), n_1G); - int expectedStackSize = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM); - ASSERT_EQ(memTable.GetStackSize(), expectedStackSize); + int expectedTableFileCount = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM); + ASSERT_EQ(memTable.GetTableFileCount(), expectedTableFileCount); status = memTable.Serialize(); ASSERT_TRUE(status.ok()); @@ -193,4 +201,127 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { ASSERT_TRUE(status.ok()); } +TEST(MEM_TEST, MEM_MANAGER_TEST) { + + auto options = engine::OptionsFactory::Build(); + options.meta.path = "/tmp/milvus_test"; + options.meta.backend_uri = "sqlite://:@:/"; + auto db_ = engine::DBFactory::Build(options); + + engine::meta::TableSchema table_info = BuildTableSchema(); + engine::Status stat = db_->CreateTable(table_info); + + engine::meta::TableSchema table_info_get; + table_info_get.table_id_ = TABLE_NAME; + stat = db_->DescribeTable(table_info_get); + ASSERT_STATS(stat); + ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); + + std::map> search_vectors; +// std::map> vectors_ids_map; + { + engine::IDNumbers vector_ids; + int64_t nb = 1024000; + std::vector xb; + BuildVectors(nb, xb); + engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + ASSERT_TRUE(status.ok()); + +// std::ofstream myfile("mem_test.txt"); +// for (int64_t i = 0; i < nb; ++i) { +// int64_t vector_id = vector_ids[i]; +// std::vector vectors; +// for (int64_t j = 0; j < TABLE_DIM; j++) { +// vectors.emplace_back(xb[i*TABLE_DIM + j]); +//// std::cout << xb[i*TABLE_DIM + j] << std::endl; +// } +// vectors_ids_map[vector_id] = vectors; +// } + + std::this_thread::sleep_for(std::chrono::seconds(3)); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution dis(0, nb - 1); + + int64_t numQuery = 1000; + for (int64_t i = 0; i < numQuery; ++i) { + int64_t index = dis(gen); + std::vector search; + for (int64_t j = 0; j < TABLE_DIM; j++) { + search.push_back(xb[index * TABLE_DIM + j]); + } + search_vectors.insert(std::make_pair(vector_ids[index], search)); +// std::cout << "index: " << index << " vector_ids[index]: " << vector_ids[index] << std::endl; + } + +// for (int64_t i = 0; i < nb; i += 100000) { +// std::vector search; +// for (int64_t j = 0; j < TABLE_DIM; j++) { +// search.push_back(xb[i * TABLE_DIM + j]); +// } +// search_vectors.insert(std::make_pair(vector_ids[i], search)); +// } + + } + + int k = 10; + for(auto& pair : search_vectors) { + auto& search = pair.second; + engine::QueryResults results; + stat = db_->Query(TABLE_NAME, k, 1, search.data(), results); + for(int t = 0; t < k; t++) { +// std::cout << "ID=" << results[0][t].first << " DISTANCE=" << results[0][t].second << std::endl; + +// std::cout << vectors_ids_map[results[0][t].first].size() << std::endl; +// for (auto& data : vectors_ids_map[results[0][t].first]) { +// std::cout << data << " "; +// } +// std::cout << std::endl; + } + // std::cout << "results[0][0].first: " << results[0][0].first << " pair.first: " << pair.first << " results[0][0].second: " << results[0][0].second << std::endl; + ASSERT_EQ(results[0][0].first, pair.first); + ASSERT_LT(results[0][0].second, 0.00001); + } + + stat = db_->DropAll(); + ASSERT_TRUE(stat.ok()); + +} + +TEST(MEM_TEST, INSERT_TEST) { + + auto options = engine::OptionsFactory::Build(); + options.meta.path = "/tmp/milvus_test"; + options.meta.backend_uri = "sqlite://:@:/"; + auto db_ = engine::DBFactory::Build(options); + + engine::meta::TableSchema table_info = BuildTableSchema(); + engine::Status stat = db_->CreateTable(table_info); + + engine::meta::TableSchema table_info_get; + table_info_get.table_id_ = TABLE_NAME; + stat = db_->DescribeTable(table_info_get); + ASSERT_STATS(stat); + ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); + + auto start_time = METRICS_NOW_TIME; + + int insert_loop = 1000; + for (int i = 0; i < insert_loop; ++i) { + int64_t nb = 204800; + std::vector xb; + BuildVectors(nb, xb); + engine::IDNumbers vector_ids; + engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + ASSERT_TRUE(status.ok()); + } + auto end_time = METRICS_NOW_TIME; + auto total_time = METRICS_MICROSECONDS(start_time, end_time); + std::cout << "total_time(ms) : " << total_time << std::endl; + + stat = db_->DropAll(); + ASSERT_TRUE(stat.ok()); + +} From fedc8adc511d64254de4f4f4f84c3f3892fd58f4 Mon Sep 17 00:00:00 2001 From: jinhai Date: Sun, 7 Jul 2019 19:16:39 +0800 Subject: [PATCH 25/29] MS-176 Add create table parameter check Former-commit-id: 7b42e581b012853673ec4df3423fbea62f61777a --- cpp/src/server/RequestTask.cpp | 60 +++++++++++------ cpp/src/utils/ValidationUtil.cpp | 74 +++++++++++++++++++++ cpp/src/utils/ValidationUtil.h | 20 ++++++ cpp/unittest/CMakeLists.txt | 4 +- cpp/unittest/db/db_tests.cpp | 13 ++-- cpp/unittest/db/mysql_db_test.cpp | 12 ++-- cpp/unittest/db/search_test.cpp | 5 +- cpp/unittest/faiss_wrapper/wrapper_test.cpp | 5 +- cpp/unittest/utils/CMakeLists.txt | 30 +++++++++ cpp/unittest/utils/ValidationUtilTest.cpp | 61 +++++++++++++++++ 10 files changed, 247 insertions(+), 37 deletions(-) create mode 100644 cpp/src/utils/ValidationUtil.cpp create mode 100644 cpp/src/utils/ValidationUtil.h create mode 100644 cpp/unittest/utils/CMakeLists.txt create mode 100644 cpp/unittest/utils/ValidationUtilTest.cpp diff --git a/cpp/src/server/RequestTask.cpp b/cpp/src/server/RequestTask.cpp index 1b91883af5..f8e617b9d4 100644 --- a/cpp/src/server/RequestTask.cpp +++ b/cpp/src/server/RequestTask.cpp @@ -8,6 +8,7 @@ #include "utils/CommonUtil.h" #include "utils/Log.h" #include "utils/TimeRecorder.h" +#include "utils/ValidationUtil.h" #include "DBWrapper.h" #include "version.h" @@ -133,19 +134,23 @@ BaseTaskPtr CreateTableTask::Create(const thrift::TableSchema& schema) { ServerError CreateTableTask::OnExecute() { TimeRecorder rc("CreateTableTask"); - + try { //step 1: check arguments - if(schema_.table_name.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); - } - if(schema_.dimension <= 0) { - return SetError(SERVER_INVALID_TABLE_DIMENSION, "Invalid table dimension: " + std::to_string(schema_.dimension)); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(schema_.table_name); + if(res != SERVER_SUCCESS) { + return res; } - engine::EngineType engine_type = EngineType(schema_.index_type); - if(engine_type == engine::EngineType::INVALID) { - return SetError(SERVER_INVALID_INDEX_TYPE, "Invalid index type: " + std::to_string(schema_.index_type)); + res = ValidateTableDimension(schema_.dimension); + if(res != SERVER_SUCCESS) { + return res; + } + + res = ValidateTableIndexType(schema_.index_type); + if(res != SERVER_SUCCESS) { + return res; } //step 2: construct table schema @@ -187,8 +192,10 @@ ServerError DescribeTableTask::OnExecute() { try { //step 1: check arguments - if(table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } //step 2: get table info @@ -230,10 +237,11 @@ ServerError HasTableTask::OnExecute() { TimeRecorder rc("HasTableTask"); //step 1: check arguments - if(table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } - //step 2: check table existence engine::Status stat = DBWrapper::DB()->HasTable(table_name_, has_table_); if(!stat.ok()) { @@ -264,8 +272,10 @@ ServerError DeleteTableTask::OnExecute() { TimeRecorder rc("DeleteTableTask"); //step 1: check arguments - if (table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } //step 2: check table existence @@ -346,8 +356,10 @@ ServerError AddVectorTask::OnExecute() { TimeRecorder rc("AddVectorTask"); //step 1: check arguments - if (table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } if(record_array_.empty()) { @@ -435,8 +447,10 @@ ServerError SearchVectorTask::OnExecute() { TimeRecorder rc("SearchVectorTask"); //step 1: check arguments - if (table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } if(top_k_ <= 0) { @@ -548,8 +562,10 @@ ServerError GetTableRowCountTask::OnExecute() { TimeRecorder rc("GetTableRowCountTask"); //step 1: check arguments - if (table_name_.empty()) { - return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name"); + ServerError res = SERVER_SUCCESS; + res = ValidateTableName(table_name_); + if(res != SERVER_SUCCESS) { + return res; } //step 2: get row count diff --git a/cpp/src/utils/ValidationUtil.cpp b/cpp/src/utils/ValidationUtil.cpp new file mode 100644 index 0000000000..b4bbd3346a --- /dev/null +++ b/cpp/src/utils/ValidationUtil.cpp @@ -0,0 +1,74 @@ +#include +#include "ValidationUtil.h" +#include "Log.h" + + +namespace zilliz { +namespace milvus { +namespace server { + +constexpr size_t table_name_size_limit = 16384; +constexpr int64_t table_dimension_limit = 16384; + +ServerError +ValidateTableName(const std::string &table_name) { + + // Table name shouldn't be empty. + if (table_name.empty()) { + SERVER_LOG_ERROR << "Empty table name"; + return SERVER_INVALID_TABLE_NAME; + } + + // Table name size shouldn't exceed 16384. + if (table_name.size() > table_name_size_limit) { + SERVER_LOG_ERROR << "Table name size exceed the limitation"; + return SERVER_INVALID_TABLE_NAME; + } + + // Table name first character should be underscore or character. + char first_char = table_name[0]; + if (first_char != '_' && std::isalpha(first_char) == 0) { + SERVER_LOG_ERROR << "Table name first character isn't underscore or character: " << first_char; + return SERVER_INVALID_TABLE_NAME; + } + + int64_t table_name_size = table_name.size(); + for (int64_t i = 1; i < table_name_size; ++i) { + char name_char = table_name[i]; + if (name_char != '_' && std::isalnum(name_char) == 0) { + SERVER_LOG_ERROR << "Table name character isn't underscore or alphanumber: " << name_char; + return SERVER_INVALID_TABLE_NAME; + } + } + + return SERVER_SUCCESS; +} + +ServerError +ValidateTableDimension(int64_t dimension) { + if (dimension <= 0 || dimension > table_dimension_limit) { + SERVER_LOG_ERROR << "Table dimension excceed the limitation: " << table_dimension_limit; + return SERVER_INVALID_VECTOR_DIMENSION; + } else { + return SERVER_SUCCESS; + } +} + +ServerError +ValidateTableIndexType(int32_t index_type) { + auto engine_type = engine::EngineType(index_type); + switch (engine_type) { + case engine::EngineType::FAISS_IDMAP: + case engine::EngineType::FAISS_IVFFLAT: { + SERVER_LOG_DEBUG << "Index type: " << index_type; + return SERVER_SUCCESS; + } + default: { + return SERVER_INVALID_INDEX_TYPE; + } + } +} + +} +} +} \ No newline at end of file diff --git a/cpp/src/utils/ValidationUtil.h b/cpp/src/utils/ValidationUtil.h new file mode 100644 index 0000000000..608ac22682 --- /dev/null +++ b/cpp/src/utils/ValidationUtil.h @@ -0,0 +1,20 @@ +#pragma once + +#include "Error.h" + +namespace zilliz { +namespace milvus { +namespace server { + +ServerError +ValidateTableName(const std::string& table_name); + +ServerError +ValidateTableDimension(int64_t dimension); + +ServerError +ValidateTableIndexType(int32_t index_type); + +} +} +} \ No newline at end of file diff --git a/cpp/unittest/CMakeLists.txt b/cpp/unittest/CMakeLists.txt index 38046617ae..62e32f6d1d 100644 --- a/cpp/unittest/CMakeLists.txt +++ b/cpp/unittest/CMakeLists.txt @@ -12,7 +12,6 @@ aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files) set(unittest_srcs ${CMAKE_CURRENT_SOURCE_DIR}/main.cpp) - #${EASYLOGGINGPP_INCLUDE_DIR}/easylogging++.cc) set(require_files ${MILVUS_ENGINE_SRC}/server/ServerConfig.cpp @@ -44,4 +43,5 @@ add_subdirectory(db) add_subdirectory(faiss_wrapper) #add_subdirectory(license) add_subdirectory(metrics) -add_subdirectory(storage) \ No newline at end of file +add_subdirectory(storage) +add_subdirectory(utils) \ No newline at end of file diff --git a/cpp/unittest/db/db_tests.cpp b/cpp/unittest/db/db_tests.cpp index bd17081af8..625211cae7 100644 --- a/cpp/unittest/db/db_tests.cpp +++ b/cpp/unittest/db/db_tests.cpp @@ -3,17 +3,20 @@ // Unauthorized copying of this file, via any medium is strictly prohibited. // Proprietary and confidential. //////////////////////////////////////////////////////////////////////////////// -#include -#include -#include -#include - #include "utils.h" #include "db/DB.h" #include "db/DBImpl.h" #include "db/MetaConsts.h" #include "db/Factories.h" +#include +#include + +#include + +#include +#include + using namespace zilliz::milvus; namespace { diff --git a/cpp/unittest/db/mysql_db_test.cpp b/cpp/unittest/db/mysql_db_test.cpp index 7fdb30a204..0e24cacdfd 100644 --- a/cpp/unittest/db/mysql_db_test.cpp +++ b/cpp/unittest/db/mysql_db_test.cpp @@ -3,17 +3,19 @@ // Unauthorized copying of this file, via any medium is strictly prohibited. // Proprietary and confidential. //////////////////////////////////////////////////////////////////////////////// -#include -#include -#include -#include - #include "utils.h" #include "db/DB.h" #include "db/DBImpl.h" #include "db/MetaConsts.h" #include "db/Factories.h" +#include +#include +#include + +#include +#include + using namespace zilliz::milvus; namespace { diff --git a/cpp/unittest/db/search_test.cpp b/cpp/unittest/db/search_test.cpp index db10bcbadf..ce99ea78f7 100644 --- a/cpp/unittest/db/search_test.cpp +++ b/cpp/unittest/db/search_test.cpp @@ -3,10 +3,11 @@ // Unauthorized copying of this file, via any medium is strictly prohibited. // Proprietary and confidential. //////////////////////////////////////////////////////////////////////////////// -#include - #include "db/scheduler/task/SearchTask.h" +#include + +#include #include using namespace zilliz::milvus; diff --git a/cpp/unittest/faiss_wrapper/wrapper_test.cpp b/cpp/unittest/faiss_wrapper/wrapper_test.cpp index 67a6c3cde8..6f4a651a55 100644 --- a/cpp/unittest/faiss_wrapper/wrapper_test.cpp +++ b/cpp/unittest/faiss_wrapper/wrapper_test.cpp @@ -4,12 +4,15 @@ // Proprietary and confidential. //////////////////////////////////////////////////////////////////////////////// -#include + #include "wrapper/Operand.h" #include "wrapper/Index.h" #include "wrapper/IndexBuilder.h" +#include +#include + using namespace zilliz::milvus::engine; diff --git a/cpp/unittest/utils/CMakeLists.txt b/cpp/unittest/utils/CMakeLists.txt new file mode 100644 index 0000000000..a46a3b05e1 --- /dev/null +++ b/cpp/unittest/utils/CMakeLists.txt @@ -0,0 +1,30 @@ +#------------------------------------------------------------------------------- +# Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved +# Unauthorized copying of this file, via any medium is strictly prohibited. +# Proprietary and confidential. +#------------------------------------------------------------------------------- + +# Make sure that your call to link_directories takes place before your call to the relevant add_executable. +include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include") +link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64") + +set(validation_util_src + ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp + ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.h) + +set(validation_util_test_src + ${unittest_srcs} + ${validation_util_src} + ${require_files} + ValidationUtilTest.cpp + ) + +add_executable(valication_util_test + ${validation_util_test_src} + ${config_files}) + +target_link_libraries(valication_util_test + ${unittest_libs} + boost_filesystem) + +install(TARGETS valication_util_test DESTINATION bin) \ No newline at end of file diff --git a/cpp/unittest/utils/ValidationUtilTest.cpp b/cpp/unittest/utils/ValidationUtilTest.cpp new file mode 100644 index 0000000000..095614e325 --- /dev/null +++ b/cpp/unittest/utils/ValidationUtilTest.cpp @@ -0,0 +1,61 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved +// Unauthorized copying of this file, via any medium is strictly prohibited. +// Proprietary and confidential. +//////////////////////////////////////////////////////////////////////////////// +#include + +#include "utils/ValidationUtil.h" +#include "utils/Error.h" + +#include + +using namespace zilliz::milvus::server; + +TEST(ValidationUtilTest, TableNameTest) { + std::string table_name = "Normal123_"; + ServerError res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_SUCCESS); + + table_name = "12sds"; + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME); + + table_name = ""; + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME); + + table_name = "_asdasd"; + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_SUCCESS); + + table_name = "!@#!@"; + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME); + + table_name = "中文"; + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME); + + + table_name = std::string('a', 32768); + res = ValidateTableName(table_name); + ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME); +} + + +TEST(ValidationUtilTest, TableDimensionTest) { + ASSERT_EQ(ValidateTableDimension(-1), SERVER_INVALID_VECTOR_DIMENSION); + ASSERT_EQ(ValidateTableDimension(0), SERVER_INVALID_VECTOR_DIMENSION); + ASSERT_EQ(ValidateTableDimension(16385), SERVER_INVALID_VECTOR_DIMENSION); + ASSERT_EQ(ValidateTableDimension(16384), SERVER_SUCCESS); + ASSERT_EQ(ValidateTableDimension(1), SERVER_SUCCESS); +} + +TEST(ValidationUtilTest, TableIndexTypeTest) { + ASSERT_EQ(ValidateTableIndexType(0), SERVER_INVALID_INDEX_TYPE); + ASSERT_EQ(ValidateTableIndexType(1), SERVER_SUCCESS); + ASSERT_EQ(ValidateTableIndexType(2), SERVER_SUCCESS); + ASSERT_EQ(ValidateTableIndexType(3), SERVER_INVALID_INDEX_TYPE); + ASSERT_EQ(ValidateTableIndexType(4), SERVER_INVALID_INDEX_TYPE); +} From 3c851370ac888b30d2e94f84926c4408e74b6b62 Mon Sep 17 00:00:00 2001 From: jinhai Date: Sun, 7 Jul 2019 19:55:16 +0800 Subject: [PATCH 26/29] MS-176 Update table name length Former-commit-id: 76a6e9ccfe4c148a2e9fea81b98e0375be6e3c0e --- cpp/src/utils/ValidationUtil.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/src/utils/ValidationUtil.cpp b/cpp/src/utils/ValidationUtil.cpp index b4bbd3346a..65cd81e670 100644 --- a/cpp/src/utils/ValidationUtil.cpp +++ b/cpp/src/utils/ValidationUtil.cpp @@ -7,7 +7,7 @@ namespace zilliz { namespace milvus { namespace server { -constexpr size_t table_name_size_limit = 16384; +constexpr size_t table_name_size_limit = 255; constexpr int64_t table_dimension_limit = 16384; ServerError From fbb25fa91040f592feaaf764983d4b426a4d4bd4 Mon Sep 17 00:00:00 2001 From: starlord Date: Sun, 7 Jul 2019 20:41:52 +0800 Subject: [PATCH 27/29] date range check Former-commit-id: ff8d7ece23d92d377febdf1eab0b1fbd77048c88 --- cpp/src/server/RequestTask.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cpp/src/server/RequestTask.cpp b/cpp/src/server/RequestTask.cpp index f8e617b9d4..2454f3a9ab 100644 --- a/cpp/src/server/RequestTask.cpp +++ b/cpp/src/server/RequestTask.cpp @@ -109,7 +109,13 @@ namespace { } long days = (tt_end > tt_start) ? (tt_end - tt_start)/DAY_SECONDS : (tt_start - tt_end)/DAY_SECONDS; - for(long i = 0; i <= days; i++) { + if(days == 0) { + error_code = SERVER_INVALID_TIME_RANGE; + error_msg = "Invalid time range: " + range.start_value + " to " + range.end_value; + return ; + } + + for(long i = 0; i < days; i++) { time_t tt_day = tt_start + DAY_SECONDS*i; tm tm_day; CommonUtil::ConvertTime(tt_day, tm_day); From a951dd14c0e8fb330870dbe7257315b8829f5c9b Mon Sep 17 00:00:00 2001 From: zhiru Date: Mon, 8 Jul 2019 11:14:28 +0800 Subject: [PATCH 28/29] Add new mem manager Former-commit-id: abab1d1c2cf67f49a4d9dcf2304df1abed675dda --- cpp/CHANGELOG.md | 1 + cpp/conf/server_config.template | 4 +- cpp/src/db/Constants.h | 3 + cpp/src/db/MemManager.cpp | 25 ++++++ cpp/src/db/MemManager.h | 6 ++ cpp/src/db/MemManagerAbstract.h | 6 ++ cpp/src/db/MemTable.cpp | 17 +++- cpp/src/db/MemTable.h | 8 +- cpp/src/db/NewMemManager.cpp | 38 +++++++++ cpp/src/db/NewMemManager.h | 6 ++ cpp/src/db/Options.h | 1 + cpp/src/server/DBWrapper.cpp | 8 ++ cpp/src/server/ServerConfig.h | 1 + cpp/unittest/db/mem_test.cpp | 144 +++++++++++++++++++++----------- cpp/unittest/db/utils.cpp | 12 +++ cpp/unittest/db/utils.h | 5 ++ 16 files changed, 231 insertions(+), 54 deletions(-) diff --git a/cpp/CHANGELOG.md b/cpp/CHANGELOG.md index 0f4e480123..fd27d05b9c 100644 --- a/cpp/CHANGELOG.md +++ b/cpp/CHANGELOG.md @@ -18,6 +18,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl ## New Feature +- MS-180 - Add new mem manager ## Task diff --git a/cpp/conf/server_config.template b/cpp/conf/server_config.template index 0383e00b53..f0cd6d5e52 100644 --- a/cpp/conf/server_config.template +++ b/cpp/conf/server_config.template @@ -2,7 +2,7 @@ server_config: address: 0.0.0.0 port: 19530 # the port milvus listen to, default: 19530, range: 1025 ~ 65534 gpu_index: 0 # the gpu milvus use, default: 0, range: 0 ~ gpu number - 1 - mode: single # milvus deployment type: single, cluster + mode: single # milvus deployment type: single, cluster, read_only db_config: db_path: @MILVUS_DB_PATH@ # milvus data storage path @@ -15,6 +15,8 @@ db_config: index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB archive_days_threshold: 30 # files older than x days will be archived, unit: day + maximum_memory: 4 # maximum memory allowed, default: 4, unit: GB, should be at least 1 GB. + # the sum of maximum_memory and cpu_cache_capacity should be less than total memory metric_config: is_startup: off # if monitoring start: on, off diff --git a/cpp/src/db/Constants.h b/cpp/src/db/Constants.h index 2bb2e0a064..1ba02b1d55 100644 --- a/cpp/src/db/Constants.h +++ b/cpp/src/db/Constants.h @@ -11,6 +11,9 @@ namespace engine { const size_t K = 1024UL; const size_t M = K*K; +const size_t G = K*M; +const size_t T = K*G; + const size_t MAX_TABLE_FILE_MEM = 128 * M; const int VECTOR_TYPE_SIZE = sizeof(float); diff --git a/cpp/src/db/MemManager.cpp b/cpp/src/db/MemManager.cpp index e36b0c45ba..ba8517cdbd 100644 --- a/cpp/src/db/MemManager.cpp +++ b/cpp/src/db/MemManager.cpp @@ -8,6 +8,7 @@ #include "MetaConsts.h" #include "EngineFactory.h" #include "metrics/Metrics.h" +#include "Log.h" #include #include @@ -128,6 +129,10 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id, size_t n, const float* vectors, IDNumbers& vector_ids) { + + LOG(DEBUG) << "MemManager::InsertVectorsNoLock: mutable mem = " << GetCurrentMutableMem() << + ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); + MemVectorsPtr mem = GetMemByTable(table_id); if (mem == nullptr) { return Status::NotFound("Group " + table_id + " not found!"); @@ -192,6 +197,26 @@ Status MemManager::EraseMemVector(const std::string& table_id) { return Status::OK(); } +size_t MemManager::GetCurrentMutableMem() { + size_t totalMem = 0; + for (auto& kv : mem_id_map_) { + auto memVector = kv.second; + totalMem += memVector->Size(); + } + return totalMem; +} + +size_t MemManager::GetCurrentImmutableMem() { + size_t totalMem = 0; + for (auto& memVector : immu_mem_list_) { + totalMem += memVector->Size(); + } + return totalMem; +} + +size_t MemManager::GetCurrentMem() { + return GetCurrentMutableMem() + GetCurrentImmutableMem(); +} } // namespace engine } // namespace milvus diff --git a/cpp/src/db/MemManager.h b/cpp/src/db/MemManager.h index 95303889db..e8460c7a6d 100644 --- a/cpp/src/db/MemManager.h +++ b/cpp/src/db/MemManager.h @@ -78,6 +78,12 @@ public: Status EraseMemVector(const std::string& table_id) override; + size_t GetCurrentMutableMem() override; + + size_t GetCurrentImmutableMem() override; + + size_t GetCurrentMem() override; + private: MemVectorsPtr GetMemByTable(const std::string& table_id); diff --git a/cpp/src/db/MemManagerAbstract.h b/cpp/src/db/MemManagerAbstract.h index 74222df1e8..58c73ba6f8 100644 --- a/cpp/src/db/MemManagerAbstract.h +++ b/cpp/src/db/MemManagerAbstract.h @@ -16,6 +16,12 @@ public: virtual Status EraseMemVector(const std::string& table_id) = 0; + virtual size_t GetCurrentMutableMem() = 0; + + virtual size_t GetCurrentImmutableMem() = 0; + + virtual size_t GetCurrentMem() = 0; + }; // MemManagerAbstract using MemManagerAbstractPtr = std::shared_ptr; diff --git a/cpp/src/db/MemTable.cpp b/cpp/src/db/MemTable.cpp index b282ad375a..ba3875fbb5 100644 --- a/cpp/src/db/MemTable.cpp +++ b/cpp/src/db/MemTable.cpp @@ -49,13 +49,15 @@ size_t MemTable::GetTableFileCount() { } Status MemTable::Serialize() { - for (auto& memTableFile : mem_table_file_list_) { - auto status = memTableFile->Serialize(); + for (auto memTableFile = mem_table_file_list_.begin(); memTableFile != mem_table_file_list_.end(); ) { + auto status = (*memTableFile)->Serialize(); if (!status.ok()) { std::string errMsg = "MemTable::Serialize failed: " + status.ToString(); ENGINE_LOG_ERROR << errMsg; return Status::Error(errMsg); } + std::lock_guard lock(mutex_); + memTableFile = mem_table_file_list_.erase(memTableFile); } return Status::OK(); } @@ -64,10 +66,19 @@ bool MemTable::Empty() { return mem_table_file_list_.empty(); } -std::string MemTable::GetTableId() { +const std::string& MemTable::GetTableId() const { return table_id_; } +size_t MemTable::GetCurrentMem() { + std::lock_guard lock(mutex_); + size_t totalMem = 0; + for (auto& memTableFile : mem_table_file_list_) { + totalMem += memTableFile->GetCurrentMem(); + } + return totalMem; +} + } // namespace engine } // namespace milvus } // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/MemTable.h b/cpp/src/db/MemTable.h index e09d6ddac1..9bae932e62 100644 --- a/cpp/src/db/MemTable.h +++ b/cpp/src/db/MemTable.h @@ -4,7 +4,7 @@ #include "MemTableFile.h" #include "VectorSource.h" -#include +#include namespace zilliz { namespace milvus { @@ -30,7 +30,9 @@ public: bool Empty(); - std::string GetTableId(); + const std::string& GetTableId() const; + + size_t GetCurrentMem(); private: const std::string table_id_; @@ -41,6 +43,8 @@ private: Options options_; + std::mutex mutex_; + }; //MemTable } // namespace engine diff --git a/cpp/src/db/NewMemManager.cpp b/cpp/src/db/NewMemManager.cpp index 19aba68eb7..3c78f37101 100644 --- a/cpp/src/db/NewMemManager.cpp +++ b/cpp/src/db/NewMemManager.cpp @@ -1,5 +1,9 @@ #include "NewMemManager.h" #include "VectorSource.h" +#include "Log.h" +#include "Constants.h" + +#include namespace zilliz { namespace milvus { @@ -20,6 +24,9 @@ Status NewMemManager::InsertVectors(const std::string& table_id_, const float* vectors_, IDNumbers& vector_ids_) { + while (GetCurrentMem() > options_.maximum_memory) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } std::unique_lock lock(mutex_); @@ -30,6 +37,10 @@ Status NewMemManager::InsertVectorsNoLock(const std::string& table_id, size_t n, const float* vectors, IDNumbers& vector_ids) { + + LOG(DEBUG) << "NewMemManager::InsertVectorsNoLock: mutable mem = " << GetCurrentMutableMem() << + ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); + MemTablePtr mem = GetMemByTable(table_id); VectorSource::Ptr source = std::make_shared(n, vectors); @@ -64,6 +75,12 @@ Status NewMemManager::Serialize(std::set& table_ids) { table_ids.insert(mem->GetTableId()); } immu_mem_list_.clear(); +// for (auto mem = immu_mem_list_.begin(); mem != immu_mem_list_.end(); ) { +// (*mem)->Serialize(); +// table_ids.insert((*mem)->GetTableId()); +// mem = immu_mem_list_.erase(mem); +// LOG(DEBUG) << "immu_mem_list_ size = " << immu_mem_list_.size(); +// } return Status::OK(); } @@ -87,6 +104,27 @@ Status NewMemManager::EraseMemVector(const std::string& table_id) { return Status::OK(); } +size_t NewMemManager::GetCurrentMutableMem() { + size_t totalMem = 0; + for (auto& kv : mem_id_map_) { + auto memTable = kv.second; + totalMem += memTable->GetCurrentMem(); + } + return totalMem; +} + +size_t NewMemManager::GetCurrentImmutableMem() { + size_t totalMem = 0; + for (auto& memTable : immu_mem_list_) { + totalMem += memTable->GetCurrentMem(); + } + return totalMem; +} + +size_t NewMemManager::GetCurrentMem() { + return GetCurrentMutableMem() + GetCurrentImmutableMem(); +} + } // namespace engine } // namespace milvus } // namespace zilliz \ No newline at end of file diff --git a/cpp/src/db/NewMemManager.h b/cpp/src/db/NewMemManager.h index a5f5a9ca13..9883480404 100644 --- a/cpp/src/db/NewMemManager.h +++ b/cpp/src/db/NewMemManager.h @@ -31,6 +31,12 @@ public: Status EraseMemVector(const std::string& table_id) override; + size_t GetCurrentMutableMem() override; + + size_t GetCurrentImmutableMem() override; + + size_t GetCurrentMem() override; + private: MemTablePtr GetMemByTable(const std::string& table_id); diff --git a/cpp/src/db/Options.h b/cpp/src/db/Options.h index 39d0a15019..47bbb45bbc 100644 --- a/cpp/src/db/Options.h +++ b/cpp/src/db/Options.h @@ -61,6 +61,7 @@ struct Options { size_t index_trigger_size = ONE_GB; //unit: byte DBMetaOptions meta; int mode = MODE::SINGLE; + float maximum_memory = 4 * ONE_GB; }; // Options diff --git a/cpp/src/server/DBWrapper.cpp b/cpp/src/server/DBWrapper.cpp index fca15cb65a..bed4440d5e 100644 --- a/cpp/src/server/DBWrapper.cpp +++ b/cpp/src/server/DBWrapper.cpp @@ -23,6 +23,14 @@ DBWrapper::DBWrapper() { if(index_size > 0) {//ensure larger than zero, unit is MB opt.index_trigger_size = (size_t)index_size * engine::ONE_MB; } + float maximum_memory = config.GetFloatValue(CONFIG_MAXMIMUM_MEMORY); + if (maximum_memory > 1.0) { + opt.maximum_memory = maximum_memory * engine::ONE_GB; + } + else { + std::cout << "ERROR: maximum_memory should be at least 1 GB" << std::endl; + kill(0, SIGUSR1); + } ConfigNode& serverConfig = ServerConfig::GetInstance().GetConfig(CONFIG_SERVER); std::string mode = serverConfig.GetValue(CONFIG_CLUSTER_MODE, "single"); diff --git a/cpp/src/server/ServerConfig.h b/cpp/src/server/ServerConfig.h index 0ec04eed8c..b3b95eb8b6 100644 --- a/cpp/src/server/ServerConfig.h +++ b/cpp/src/server/ServerConfig.h @@ -26,6 +26,7 @@ static const std::string CONFIG_DB_PATH = "db_path"; static const std::string CONFIG_DB_INDEX_TRIGGER_SIZE = "index_building_threshold"; static const std::string CONFIG_DB_ARCHIVE_DISK = "archive_disk_threshold"; static const std::string CONFIG_DB_ARCHIVE_DAYS = "archive_days_threshold"; +static const std::string CONFIG_MAXMIMUM_MEMORY = "maximum_memory"; static const std::string CONFIG_LOG = "log_config"; diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp index 915610adcc..818c3a6388 100644 --- a/cpp/unittest/db/mem_test.cpp +++ b/cpp/unittest/db/mem_test.cpp @@ -8,6 +8,8 @@ #include "db/Constants.h" #include "db/EngineFactory.h" #include "metrics/Metrics.h" +#include "db/MetaConsts.h" +#include "boost/filesystem.hpp" #include #include @@ -34,9 +36,6 @@ namespace { vectors.clear(); vectors.resize(n*TABLE_DIM); float* data = vectors.data(); -// std::random_device rd; -// std::mt19937 gen(rd()); -// std::uniform_real_distribution<> dis(0.0, 1.0); for(int i = 0; i < n; i++) { for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48(); data[TABLE_DIM * i] += i / 2000.; @@ -44,7 +43,7 @@ namespace { } } -TEST(MEM_TEST, VECTOR_SOURCE_TEST) { +TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) { std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); @@ -91,7 +90,7 @@ TEST(MEM_TEST, VECTOR_SOURCE_TEST) { ASSERT_TRUE(status.ok()); } -TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { +TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) { std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); auto options = engine::OptionsFactory::Build(); @@ -135,7 +134,7 @@ TEST(MEM_TEST, MEM_TABLE_FILE_TEST) { ASSERT_TRUE(status.ok()); } -TEST(MEM_TEST, MEM_TABLE_TEST) { +TEST_F(NewMemManagerTest, MEM_TABLE_TEST) { std::shared_ptr impl_ = engine::DBMetaImplFactory::Build(); auto options = engine::OptionsFactory::Build(); @@ -201,7 +200,7 @@ TEST(MEM_TEST, MEM_TABLE_TEST) { ASSERT_TRUE(status.ok()); } -TEST(MEM_TEST, MEM_MANAGER_TEST) { +TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) { auto options = engine::OptionsFactory::Build(); options.meta.path = "/tmp/milvus_test"; @@ -218,7 +217,6 @@ TEST(MEM_TEST, MEM_MANAGER_TEST) { ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); std::map> search_vectors; -// std::map> vectors_ids_map; { engine::IDNumbers vector_ids; int64_t nb = 1024000; @@ -227,24 +225,13 @@ TEST(MEM_TEST, MEM_MANAGER_TEST) { engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); ASSERT_TRUE(status.ok()); -// std::ofstream myfile("mem_test.txt"); -// for (int64_t i = 0; i < nb; ++i) { -// int64_t vector_id = vector_ids[i]; -// std::vector vectors; -// for (int64_t j = 0; j < TABLE_DIM; j++) { -// vectors.emplace_back(xb[i*TABLE_DIM + j]); -//// std::cout << xb[i*TABLE_DIM + j] << std::endl; -// } -// vectors_ids_map[vector_id] = vectors; -// } - std::this_thread::sleep_for(std::chrono::seconds(3)); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution dis(0, nb - 1); - int64_t numQuery = 1000; + int64_t numQuery = 20; for (int64_t i = 0; i < numQuery; ++i) { int64_t index = dis(gen); std::vector search; @@ -252,17 +239,7 @@ TEST(MEM_TEST, MEM_MANAGER_TEST) { search.push_back(xb[index * TABLE_DIM + j]); } search_vectors.insert(std::make_pair(vector_ids[index], search)); -// std::cout << "index: " << index << " vector_ids[index]: " << vector_ids[index] << std::endl; } - -// for (int64_t i = 0; i < nb; i += 100000) { -// std::vector search; -// for (int64_t j = 0; j < TABLE_DIM; j++) { -// search.push_back(xb[i * TABLE_DIM + j]); -// } -// search_vectors.insert(std::make_pair(vector_ids[i], search)); -// } - } int k = 10; @@ -270,26 +247,16 @@ TEST(MEM_TEST, MEM_MANAGER_TEST) { auto& search = pair.second; engine::QueryResults results; stat = db_->Query(TABLE_NAME, k, 1, search.data(), results); - for(int t = 0; t < k; t++) { -// std::cout << "ID=" << results[0][t].first << " DISTANCE=" << results[0][t].second << std::endl; - -// std::cout << vectors_ids_map[results[0][t].first].size() << std::endl; -// for (auto& data : vectors_ids_map[results[0][t].first]) { -// std::cout << data << " "; -// } -// std::cout << std::endl; - } - // std::cout << "results[0][0].first: " << results[0][0].first << " pair.first: " << pair.first << " results[0][0].second: " << results[0][0].second << std::endl; ASSERT_EQ(results[0][0].first, pair.first); ASSERT_LT(results[0][0].second, 0.00001); } - stat = db_->DropAll(); - ASSERT_TRUE(stat.ok()); + delete db_; + boost::filesystem::remove_all(options.meta.path); } -TEST(MEM_TEST, INSERT_TEST) { +TEST_F(NewMemManagerTest, INSERT_TEST) { auto options = engine::OptionsFactory::Build(); options.meta.path = "/tmp/milvus_test"; @@ -307,9 +274,9 @@ TEST(MEM_TEST, INSERT_TEST) { auto start_time = METRICS_NOW_TIME; - int insert_loop = 1000; + int insert_loop = 20; for (int i = 0; i < insert_loop; ++i) { - int64_t nb = 204800; + int64_t nb = 409600; std::vector xb; BuildVectors(nb, xb); engine::IDNumbers vector_ids; @@ -318,10 +285,91 @@ TEST(MEM_TEST, INSERT_TEST) { } auto end_time = METRICS_NOW_TIME; auto total_time = METRICS_MICROSECONDS(start_time, end_time); - std::cout << "total_time(ms) : " << total_time << std::endl; + LOG(DEBUG) << "total_time spent in INSERT_TEST (ms) : " << total_time; - stat = db_->DropAll(); - ASSERT_TRUE(stat.ok()); + delete db_; + boost::filesystem::remove_all(options.meta.path); } +TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) { + + auto options = engine::OptionsFactory::Build(); + options.meta.path = "/tmp/milvus_test"; + options.meta.backend_uri = "sqlite://:@:/"; + auto db_ = engine::DBFactory::Build(options); + + engine::meta::TableSchema table_info = BuildTableSchema(); + engine::Status stat = db_->CreateTable(table_info); + + engine::meta::TableSchema table_info_get; + table_info_get.table_id_ = TABLE_NAME; + stat = db_->DescribeTable(table_info_get); + ASSERT_STATS(stat); + ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); + + engine::IDNumbers vector_ids; + engine::IDNumbers target_ids; + + int64_t nb = 409600; + std::vector xb; + BuildVectors(nb, xb); + + int64_t qb = 5; + std::vector qxb; + BuildVectors(qb, qxb); + + std::thread search([&]() { + engine::QueryResults results; + int k = 10; + std::this_thread::sleep_for(std::chrono::seconds(2)); + + INIT_TIMER; + std::stringstream ss; + uint64_t count = 0; + uint64_t prev_count = 0; + + for (auto j=0; j<10; ++j) { + ss.str(""); + db_->Size(count); + prev_count = count; + + START_TIMER; + stat = db_->Query(TABLE_NAME, k, qb, qxb.data(), results); + ss << "Search " << j << " With Size " << count/engine::meta::M << " M"; + STOP_TIMER(ss.str()); + + ASSERT_STATS(stat); + for (auto k=0; k= prev_count); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + }); + + int loop = 20; + + for (auto i=0; iInsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); + ASSERT_EQ(target_ids.size(), qb); + } else { + db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + } + std::this_thread::sleep_for(std::chrono::microseconds(1)); + } + + search.join(); + + delete db_; + boost::filesystem::remove_all(options.meta.path); + +}; + diff --git a/cpp/unittest/db/utils.cpp b/cpp/unittest/db/utils.cpp index 70c0712549..ae05c59d3b 100644 --- a/cpp/unittest/db/utils.cpp +++ b/cpp/unittest/db/utils.cpp @@ -106,6 +106,18 @@ zilliz::milvus::engine::Options MySQLDBTest::GetOptions() { return options; } +void NewMemManagerTest::InitLog() { + el::Configurations defaultConf; + defaultConf.setToDefault(); + defaultConf.set(el::Level::Debug, + el::ConfigurationType::Format, "[%thread-%datetime-%level]: %msg (%fbase:%line)"); + el::Loggers::reconfigureLogger("default", defaultConf); +} + +void NewMemManagerTest::SetUp() { + InitLog(); +} + int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); if (argc > 1) { diff --git a/cpp/unittest/db/utils.h b/cpp/unittest/db/utils.h index 361c24b4be..d06500de5c 100644 --- a/cpp/unittest/db/utils.h +++ b/cpp/unittest/db/utils.h @@ -87,3 +87,8 @@ class MySQLDBTest : public ::testing::Test { protected: zilliz::milvus::engine::Options GetOptions(); }; + +class NewMemManagerTest : public ::testing::Test { + void InitLog(); + virtual void SetUp() override; +}; From ed23b7056ff47a8f798534ecd87f09adda0a50e5 Mon Sep 17 00:00:00 2001 From: zhiru Date: Mon, 8 Jul 2019 15:07:03 +0800 Subject: [PATCH 29/29] update Former-commit-id: 6edbbf6f4bca89c568c71d5e4bd0de1be84e6137 --- cpp/src/db/Constants.h | 6 +- cpp/src/db/Factories.cpp | 19 +++--- cpp/src/db/Factories.h | 9 +-- cpp/src/db/MemManager.cpp | 83 ++++++++++++++------------ cpp/src/db/MemManager.h | 43 +++++++------- cpp/src/db/MemManagerAbstract.h | 11 ++-- cpp/src/db/MemTable.cpp | 66 +++++++++++---------- cpp/src/db/MemTable.h | 13 ++-- cpp/src/db/MemTableFile.cpp | 56 +++++++++--------- cpp/src/db/MemTableFile.h | 9 +-- cpp/src/db/NewMemManager.cpp | 63 ++++++++++---------- cpp/src/db/NewMemManager.h | 23 ++++---- cpp/src/db/VectorSource.cpp | 18 +++--- cpp/src/db/VectorSource.h | 19 +++--- cpp/unittest/db/mem_test.cpp | 101 ++++++++++++++++---------------- cpp/unittest/db/utils.h | 20 +++---- 16 files changed, 284 insertions(+), 275 deletions(-) diff --git a/cpp/src/db/Constants.h b/cpp/src/db/Constants.h index 1ba02b1d55..055b10ca9a 100644 --- a/cpp/src/db/Constants.h +++ b/cpp/src/db/Constants.h @@ -10,9 +10,9 @@ namespace milvus { namespace engine { const size_t K = 1024UL; -const size_t M = K*K; -const size_t G = K*M; -const size_t T = K*G; +const size_t M = K * K; +const size_t G = K * M; +const size_t T = K * G; const size_t MAX_TABLE_FILE_MEM = 128 * M; diff --git a/cpp/src/db/Factories.cpp b/cpp/src/db/Factories.cpp index d51727cbff..65c7484a50 100644 --- a/cpp/src/db/Factories.cpp +++ b/cpp/src/db/Factories.cpp @@ -22,6 +22,8 @@ namespace zilliz { namespace milvus { namespace engine { +#define USE_NEW_MEM_MANAGER 1 + DBMetaOptions DBMetaOptionsFactory::Build(const std::string& path) { auto p = path; if(p == "") { @@ -74,17 +76,14 @@ std::shared_ptr DBMetaImplFactory::Build(const DBMetaOptions& metaOp if (dialect.find("mysql") != std::string::npos) { ENGINE_LOG_INFO << "Using MySQL"; return std::make_shared(meta::MySQLMetaImpl(metaOptions, mode)); - } - else if (dialect.find("sqlite") != std::string::npos) { + } else if (dialect.find("sqlite") != std::string::npos) { ENGINE_LOG_INFO << "Using SQLite"; return std::make_shared(meta::DBMetaImpl(metaOptions)); - } - else { + } else { ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << dialect; throw InvalidArgumentException("URI dialect is not mysql / sqlite"); } - } - else { + } else { ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri; throw InvalidArgumentException("Wrong URI format "); } @@ -102,11 +101,11 @@ DB* DBFactory::Build(const Options& options) { MemManagerAbstractPtr MemManagerFactory::Build(const std::shared_ptr& meta, const Options& options) { - bool useNew = true; - if (useNew) { - return std::make_shared(meta, options); - } +#ifdef USE_NEW_MEM_MANAGER + return std::make_shared(meta, options); +#else return std::make_shared(meta, options); +#endif } } // namespace engine diff --git a/cpp/src/db/Factories.h b/cpp/src/db/Factories.h index 567bc0a8bc..8b6e7b100f 100644 --- a/cpp/src/db/Factories.h +++ b/cpp/src/db/Factories.h @@ -15,12 +15,13 @@ #include #include + namespace zilliz { namespace milvus { namespace engine { struct DBMetaOptionsFactory { - static DBMetaOptions Build(const std::string& path = ""); + static DBMetaOptions Build(const std::string &path = ""); }; struct OptionsFactory { @@ -29,16 +30,16 @@ struct OptionsFactory { struct DBMetaImplFactory { static std::shared_ptr Build(); - static std::shared_ptr Build(const DBMetaOptions& metaOptions, const int& mode); + static std::shared_ptr Build(const DBMetaOptions &metaOptions, const int &mode); }; struct DBFactory { static std::shared_ptr Build(); - static DB* Build(const Options&); + static DB *Build(const Options &); }; struct MemManagerFactory { - static MemManagerAbstractPtr Build(const std::shared_ptr& meta, const Options& options); + static MemManagerAbstractPtr Build(const std::shared_ptr &meta, const Options &options); }; } // namespace engine diff --git a/cpp/src/db/MemManager.cpp b/cpp/src/db/MemManager.cpp index ba8517cdbd..dbf0703173 100644 --- a/cpp/src/db/MemManager.cpp +++ b/cpp/src/db/MemManager.cpp @@ -15,22 +15,23 @@ #include #include + namespace zilliz { namespace milvus { namespace engine { -MemVectors::MemVectors(const std::shared_ptr& meta_ptr, - const meta::TableFileSchema& schema, const Options& options) - : meta_(meta_ptr), - options_(options), - schema_(schema), - id_generator_(new SimpleIDGenerator()), - active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) { +MemVectors::MemVectors(const std::shared_ptr &meta_ptr, + const meta::TableFileSchema &schema, const Options &options) + : meta_(meta_ptr), + options_(options), + schema_(schema), + id_generator_(new SimpleIDGenerator()), + active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType) schema_.engine_type_)) { } -Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) { - if(active_engine_ == nullptr) { +Status MemVectors::Add(size_t n_, const float *vectors_, IDNumbers &vector_ids_) { + if (active_engine_ == nullptr) { return Status::Error("index engine is null"); } @@ -39,13 +40,15 @@ Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) Status status = active_engine_->AddWithIds(n_, vectors_, vector_ids_.data()); auto end_time = METRICS_NOW_TIME; auto total_time = METRICS_MICROSECONDS(start_time, end_time); - server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast(n_), static_cast(schema_.dimension_), total_time); + server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast(n_), + static_cast(schema_.dimension_), + total_time); return status; } size_t MemVectors::RowCount() const { - if(active_engine_ == nullptr) { + if (active_engine_ == nullptr) { return 0; } @@ -53,15 +56,15 @@ size_t MemVectors::RowCount() const { } size_t MemVectors::Size() const { - if(active_engine_ == nullptr) { + if (active_engine_ == nullptr) { return 0; } return active_engine_->Size(); } -Status MemVectors::Serialize(std::string& table_id) { - if(active_engine_ == nullptr) { +Status MemVectors::Serialize(std::string &table_id) { + if (active_engine_ == nullptr) { return Status::Error("index engine is null"); } @@ -73,15 +76,16 @@ Status MemVectors::Serialize(std::string& table_id) { auto total_time = METRICS_MICROSECONDS(start_time, end_time); schema_.size_ = size; - server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet(size/total_time); + server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet(size / total_time); schema_.file_type_ = (size >= options_.index_trigger_size) ? - meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW; + meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW; auto status = meta_->UpdateTableFile(schema_); LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index") - << " file " << schema_.file_id_ << " of size " << (double)(active_engine_->Size()) / (double)meta::M << " M"; + << " file " << schema_.file_id_ << " of size " << (double) (active_engine_->Size()) / (double) meta::M + << " M"; active_engine_->Cache(); @@ -99,7 +103,7 @@ MemVectors::~MemVectors() { * MemManager */ MemManager::MemVectorsPtr MemManager::GetMemByTable( - const std::string& table_id) { + const std::string &table_id) { auto memIt = mem_id_map_.find(table_id); if (memIt != mem_id_map_.end()) { return memIt->second; @@ -116,22 +120,23 @@ MemManager::MemVectorsPtr MemManager::GetMemByTable( return mem_id_map_[table_id]; } -Status MemManager::InsertVectors(const std::string& table_id_, - size_t n_, - const float* vectors_, - IDNumbers& vector_ids_) { +Status MemManager::InsertVectors(const std::string &table_id_, + size_t n_, + const float *vectors_, + IDNumbers &vector_ids_) { + + LOG(DEBUG) << "MemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() << + ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); + std::unique_lock lock(mutex_); return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_); } -Status MemManager::InsertVectorsNoLock(const std::string& table_id, - size_t n, - const float* vectors, - IDNumbers& vector_ids) { - - LOG(DEBUG) << "MemManager::InsertVectorsNoLock: mutable mem = " << GetCurrentMutableMem() << - ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); +Status MemManager::InsertVectorsNoLock(const std::string &table_id, + size_t n, + const float *vectors, + IDNumbers &vector_ids) { MemVectorsPtr mem = GetMemByTable(table_id); if (mem == nullptr) { @@ -139,7 +144,7 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id, } //makesure each file size less than index_trigger_size - if(mem->Size() > options_.index_trigger_size) { + if (mem->Size() > options_.index_trigger_size) { std::unique_lock lock(serialization_mtx_); immu_mem_list_.push_back(mem); mem_id_map_.erase(table_id); @@ -152,8 +157,8 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id, Status MemManager::ToImmutable() { std::unique_lock lock(mutex_); MemIdMap temp_map; - for (auto& kv: mem_id_map_) { - if(kv.second->RowCount() == 0) { + for (auto &kv: mem_id_map_) { + if (kv.second->RowCount() == 0) { temp_map.insert(kv); continue;//empty vector, no need to serialize } @@ -164,12 +169,12 @@ Status MemManager::ToImmutable() { return Status::OK(); } -Status MemManager::Serialize(std::set& table_ids) { +Status MemManager::Serialize(std::set &table_ids) { ToImmutable(); std::unique_lock lock(serialization_mtx_); std::string table_id; table_ids.clear(); - for (auto& mem : immu_mem_list_) { + for (auto &mem : immu_mem_list_) { mem->Serialize(table_id); table_ids.insert(table_id); } @@ -177,7 +182,7 @@ Status MemManager::Serialize(std::set& table_ids) { return Status::OK(); } -Status MemManager::EraseMemVector(const std::string& table_id) { +Status MemManager::EraseMemVector(const std::string &table_id) { {//erase MemVector from rapid-insert cache std::unique_lock lock(mutex_); mem_id_map_.erase(table_id); @@ -186,8 +191,8 @@ Status MemManager::EraseMemVector(const std::string& table_id) { {//erase MemVector from serialize cache std::unique_lock lock(serialization_mtx_); MemList temp_list; - for (auto& mem : immu_mem_list_) { - if(mem->TableId() != table_id) { + for (auto &mem : immu_mem_list_) { + if (mem->TableId() != table_id) { temp_list.push_back(mem); } } @@ -199,7 +204,7 @@ Status MemManager::EraseMemVector(const std::string& table_id) { size_t MemManager::GetCurrentMutableMem() { size_t totalMem = 0; - for (auto& kv : mem_id_map_) { + for (auto &kv : mem_id_map_) { auto memVector = kv.second; totalMem += memVector->Size(); } @@ -208,7 +213,7 @@ size_t MemManager::GetCurrentMutableMem() { size_t MemManager::GetCurrentImmutableMem() { size_t totalMem = 0; - for (auto& memVector : immu_mem_list_) { + for (auto &memVector : immu_mem_list_) { totalMem += memVector->Size(); } return totalMem; diff --git a/cpp/src/db/MemManager.h b/cpp/src/db/MemManager.h index e8460c7a6d..5ad3d08b63 100644 --- a/cpp/src/db/MemManager.h +++ b/cpp/src/db/MemManager.h @@ -17,45 +17,46 @@ #include #include + namespace zilliz { namespace milvus { namespace engine { namespace meta { - class Meta; +class Meta; } class MemVectors { -public: + public: using MetaPtr = meta::Meta::Ptr; using Ptr = std::shared_ptr; - explicit MemVectors(const std::shared_ptr&, - const meta::TableFileSchema&, const Options&); + explicit MemVectors(const std::shared_ptr &, + const meta::TableFileSchema &, const Options &); - Status Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_); + Status Add(size_t n_, const float *vectors_, IDNumbers &vector_ids_); size_t RowCount() const; size_t Size() const; - Status Serialize(std::string& table_id); + Status Serialize(std::string &table_id); ~MemVectors(); - const std::string& Location() const { return schema_.location_; } + const std::string &Location() const { return schema_.location_; } std::string TableId() const { return schema_.table_id_; } -private: + private: MemVectors() = delete; - MemVectors(const MemVectors&) = delete; - MemVectors& operator=(const MemVectors&) = delete; + MemVectors(const MemVectors &) = delete; + MemVectors &operator=(const MemVectors &) = delete; MetaPtr meta_; Options options_; meta::TableFileSchema schema_; - IDGenerator* id_generator_; + IDGenerator *id_generator_; ExecutionEnginePtr active_engine_; }; // MemVectors @@ -63,20 +64,20 @@ private: class MemManager : public MemManagerAbstract { -public: + public: using MetaPtr = meta::Meta::Ptr; using MemVectorsPtr = typename MemVectors::Ptr; using Ptr = std::shared_ptr; - MemManager(const std::shared_ptr& meta, const Options& options) + MemManager(const std::shared_ptr &meta, const Options &options) : meta_(meta), options_(options) {} - Status InsertVectors(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids) override; + Status InsertVectors(const std::string &table_id, + size_t n, const float *vectors, IDNumbers &vector_ids) override; - Status Serialize(std::set& table_ids) override; + Status Serialize(std::set &table_ids) override; - Status EraseMemVector(const std::string& table_id) override; + Status EraseMemVector(const std::string &table_id) override; size_t GetCurrentMutableMem() override; @@ -84,11 +85,11 @@ public: size_t GetCurrentMem() override; -private: - MemVectorsPtr GetMemByTable(const std::string& table_id); + private: + MemVectorsPtr GetMemByTable(const std::string &table_id); - Status InsertVectorsNoLock(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids); + Status InsertVectorsNoLock(const std::string &table_id, + size_t n, const float *vectors, IDNumbers &vector_ids); Status ToImmutable(); using MemIdMap = std::map; diff --git a/cpp/src/db/MemManagerAbstract.h b/cpp/src/db/MemManagerAbstract.h index 58c73ba6f8..943c454e46 100644 --- a/cpp/src/db/MemManagerAbstract.h +++ b/cpp/src/db/MemManagerAbstract.h @@ -2,19 +2,20 @@ #include + namespace zilliz { namespace milvus { namespace engine { class MemManagerAbstract { -public: + public: - virtual Status InsertVectors(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids) = 0; + virtual Status InsertVectors(const std::string &table_id, + size_t n, const float *vectors, IDNumbers &vector_ids) = 0; - virtual Status Serialize(std::set& table_ids) = 0; + virtual Status Serialize(std::set &table_ids) = 0; - virtual Status EraseMemVector(const std::string& table_id) = 0; + virtual Status EraseMemVector(const std::string &table_id) = 0; virtual size_t GetCurrentMutableMem() = 0; diff --git a/cpp/src/db/MemTable.cpp b/cpp/src/db/MemTable.cpp index ba3875fbb5..e05aa058ac 100644 --- a/cpp/src/db/MemTable.cpp +++ b/cpp/src/db/MemTable.cpp @@ -1,46 +1,50 @@ #include "MemTable.h" #include "Log.h" + namespace zilliz { namespace milvus { namespace engine { -MemTable::MemTable(const std::string& table_id, - const std::shared_ptr& meta, - const Options& options) : - table_id_(table_id), - meta_(meta), - options_(options) { +MemTable::MemTable(const std::string &table_id, + const std::shared_ptr &meta, + const Options &options) : + table_id_(table_id), + meta_(meta), + options_(options) { } -Status MemTable::Add(VectorSource::Ptr& source) { +Status MemTable::Add(VectorSource::Ptr &source) { + while (!source->AllAdded()) { - MemTableFile::Ptr currentMemTableFile; + + MemTableFile::Ptr current_mem_table_file; if (!mem_table_file_list_.empty()) { - currentMemTableFile = mem_table_file_list_.back(); + current_mem_table_file = mem_table_file_list_.back(); } + Status status; - if (mem_table_file_list_.empty() || currentMemTableFile->IsFull()) { - MemTableFile::Ptr newMemTableFile = std::make_shared(table_id_, meta_, options_); - status = newMemTableFile->Add(source); + if (mem_table_file_list_.empty() || current_mem_table_file->IsFull()) { + MemTableFile::Ptr new_mem_table_file = std::make_shared(table_id_, meta_, options_); + status = new_mem_table_file->Add(source); if (status.ok()) { - mem_table_file_list_.emplace_back(newMemTableFile); + mem_table_file_list_.emplace_back(new_mem_table_file); } + } else { + status = current_mem_table_file->Add(source); } - else { - status = currentMemTableFile->Add(source); - } + if (!status.ok()) { - std::string errMsg = "MemTable::Add failed: " + status.ToString(); - ENGINE_LOG_ERROR << errMsg; - return Status::Error(errMsg); + std::string err_msg = "MemTable::Add failed: " + status.ToString(); + ENGINE_LOG_ERROR << err_msg; + return Status::Error(err_msg); } } return Status::OK(); } -void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file) { +void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr &mem_table_file) { mem_table_file = mem_table_file_list_.back(); } @@ -49,15 +53,15 @@ size_t MemTable::GetTableFileCount() { } Status MemTable::Serialize() { - for (auto memTableFile = mem_table_file_list_.begin(); memTableFile != mem_table_file_list_.end(); ) { - auto status = (*memTableFile)->Serialize(); + for (auto mem_table_file = mem_table_file_list_.begin(); mem_table_file != mem_table_file_list_.end();) { + auto status = (*mem_table_file)->Serialize(); if (!status.ok()) { - std::string errMsg = "MemTable::Serialize failed: " + status.ToString(); - ENGINE_LOG_ERROR << errMsg; - return Status::Error(errMsg); + std::string err_msg = "MemTable::Serialize failed: " + status.ToString(); + ENGINE_LOG_ERROR << err_msg; + return Status::Error(err_msg); } std::lock_guard lock(mutex_); - memTableFile = mem_table_file_list_.erase(memTableFile); + mem_table_file = mem_table_file_list_.erase(mem_table_file); } return Status::OK(); } @@ -66,17 +70,17 @@ bool MemTable::Empty() { return mem_table_file_list_.empty(); } -const std::string& MemTable::GetTableId() const { +const std::string &MemTable::GetTableId() const { return table_id_; } size_t MemTable::GetCurrentMem() { std::lock_guard lock(mutex_); - size_t totalMem = 0; - for (auto& memTableFile : mem_table_file_list_) { - totalMem += memTableFile->GetCurrentMem(); + size_t total_mem = 0; + for (auto &mem_table_file : mem_table_file_list_) { + total_mem += mem_table_file->GetCurrentMem(); } - return totalMem; + return total_mem; } } // namespace engine diff --git a/cpp/src/db/MemTable.h b/cpp/src/db/MemTable.h index 9bae932e62..198fcc228a 100644 --- a/cpp/src/db/MemTable.h +++ b/cpp/src/db/MemTable.h @@ -6,23 +6,24 @@ #include + namespace zilliz { namespace milvus { namespace engine { class MemTable { -public: + public: using Ptr = std::shared_ptr; using MemTableFileList = std::vector; using MetaPtr = meta::Meta::Ptr; - MemTable(const std::string& table_id, const std::shared_ptr& meta, const Options& options); + MemTable(const std::string &table_id, const std::shared_ptr &meta, const Options &options); - Status Add(VectorSource::Ptr& source); + Status Add(VectorSource::Ptr &source); - void GetCurrentMemTableFile(MemTableFile::Ptr& mem_table_file); + void GetCurrentMemTableFile(MemTableFile::Ptr &mem_table_file); size_t GetTableFileCount(); @@ -30,11 +31,11 @@ public: bool Empty(); - const std::string& GetTableId() const; + const std::string &GetTableId() const; size_t GetCurrentMem(); -private: + private: const std::string table_id_; MemTableFileList mem_table_file_list_; diff --git a/cpp/src/db/MemTableFile.cpp b/cpp/src/db/MemTableFile.cpp index 0ff91de00b..649a680cf3 100644 --- a/cpp/src/db/MemTableFile.cpp +++ b/cpp/src/db/MemTableFile.cpp @@ -6,23 +6,24 @@ #include + namespace zilliz { namespace milvus { namespace engine { -MemTableFile::MemTableFile(const std::string& table_id, - const std::shared_ptr& meta, - const Options& options) : - table_id_(table_id), - meta_(meta), - options_(options) { +MemTableFile::MemTableFile(const std::string &table_id, + const std::shared_ptr &meta, + const Options &options) : + table_id_(table_id), + meta_(meta), + options_(options) { current_mem_ = 0; auto status = CreateTableFile(); if (status.ok()) { execution_engine_ = EngineFactory::Build(table_file_schema_.dimension_, table_file_schema_.location_, - (EngineType)table_file_schema_.engine_type_); + (EngineType) table_file_schema_.engine_type_); } } @@ -33,31 +34,30 @@ Status MemTableFile::CreateTableFile() { auto status = meta_->CreateTableFile(table_file_schema); if (status.ok()) { table_file_schema_ = table_file_schema; - } - else { - std::string errMsg = "MemTableFile::CreateTableFile failed: " + status.ToString(); - ENGINE_LOG_ERROR << errMsg; + } else { + std::string err_msg = "MemTableFile::CreateTableFile failed: " + status.ToString(); + ENGINE_LOG_ERROR << err_msg; } return status; } -Status MemTableFile::Add(const VectorSource::Ptr& source) { +Status MemTableFile::Add(const VectorSource::Ptr &source) { if (table_file_schema_.dimension_ <= 0) { - std::string errMsg = "MemTableFile::Add: table_file_schema dimension = " + - std::to_string(table_file_schema_.dimension_) + ", table_id = " + table_file_schema_.table_id_; - ENGINE_LOG_ERROR << errMsg; - return Status::Error(errMsg); + std::string err_msg = "MemTableFile::Add: table_file_schema dimension = " + + std::to_string(table_file_schema_.dimension_) + ", table_id = " + table_file_schema_.table_id_; + ENGINE_LOG_ERROR << err_msg; + return Status::Error(err_msg); } - size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; - size_t memLeft = GetMemLeft(); - if (memLeft >= singleVectorMemSize) { - size_t numVectorsToAdd = std::ceil(memLeft / singleVectorMemSize); - size_t numVectorsAdded; - auto status = source->Add(execution_engine_, table_file_schema_, numVectorsToAdd, numVectorsAdded); + size_t single_vector_mem_size = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; + size_t mem_left = GetMemLeft(); + if (mem_left >= single_vector_mem_size) { + size_t num_vectors_to_add = std::ceil(mem_left / single_vector_mem_size); + size_t num_vectors_added; + auto status = source->Add(execution_engine_, table_file_schema_, num_vectors_to_add, num_vectors_added); if (status.ok()) { - current_mem_ += (numVectorsAdded * singleVectorMemSize); + current_mem_ += (num_vectors_added * single_vector_mem_size); } return status; } @@ -73,8 +73,8 @@ size_t MemTableFile::GetMemLeft() { } bool MemTableFile::IsFull() { - size_t singleVectorMemSize = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; - return (GetMemLeft() < singleVectorMemSize); + size_t single_vector_mem_size = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE; + return (GetMemLeft() < single_vector_mem_size); } Status MemTableFile::Serialize() { @@ -88,15 +88,15 @@ Status MemTableFile::Serialize() { auto total_time = METRICS_MICROSECONDS(start_time, end_time); table_file_schema_.size_ = size; - server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet((double)size/total_time); + server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet((double) size / total_time); table_file_schema_.file_type_ = (size >= options_.index_trigger_size) ? - meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW; + meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW; auto status = meta_->UpdateTableFile(table_file_schema_); LOG(DEBUG) << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index") - << " file " << table_file_schema_.file_id_ << " of size " << (double)size / (double)M << " M"; + << " file " << table_file_schema_.file_id_ << " of size " << (double) size / (double) M << " M"; execution_engine_->Cache(); diff --git a/cpp/src/db/MemTableFile.h b/cpp/src/db/MemTableFile.h index 1be0ae78ba..4d0011b362 100644 --- a/cpp/src/db/MemTableFile.h +++ b/cpp/src/db/MemTableFile.h @@ -5,20 +5,21 @@ #include "VectorSource.h" #include "ExecutionEngine.h" + namespace zilliz { namespace milvus { namespace engine { class MemTableFile { -public: + public: using Ptr = std::shared_ptr; using MetaPtr = meta::Meta::Ptr; - MemTableFile(const std::string& table_id, const std::shared_ptr& meta, const Options& options); + MemTableFile(const std::string &table_id, const std::shared_ptr &meta, const Options &options); - Status Add(const VectorSource::Ptr& source); + Status Add(const VectorSource::Ptr &source); size_t GetCurrentMem(); @@ -28,7 +29,7 @@ public: Status Serialize(); -private: + private: Status CreateTableFile(); diff --git a/cpp/src/db/NewMemManager.cpp b/cpp/src/db/NewMemManager.cpp index 3c78f37101..b0fcc9d4ae 100644 --- a/cpp/src/db/NewMemManager.cpp +++ b/cpp/src/db/NewMemManager.cpp @@ -5,11 +5,12 @@ #include + namespace zilliz { namespace milvus { namespace engine { -NewMemManager::MemTablePtr NewMemManager::GetMemByTable(const std::string& table_id) { +NewMemManager::MemTablePtr NewMemManager::GetMemByTable(const std::string &table_id) { auto memIt = mem_id_map_.find(table_id); if (memIt != mem_id_map_.end()) { return memIt->second; @@ -19,27 +20,27 @@ NewMemManager::MemTablePtr NewMemManager::GetMemByTable(const std::string& table return mem_id_map_[table_id]; } -Status NewMemManager::InsertVectors(const std::string& table_id_, +Status NewMemManager::InsertVectors(const std::string &table_id_, size_t n_, - const float* vectors_, - IDNumbers& vector_ids_) { + const float *vectors_, + IDNumbers &vector_ids_) { while (GetCurrentMem() > options_.maximum_memory) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } + LOG(DEBUG) << "NewMemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() << + ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); + std::unique_lock lock(mutex_); return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_); } -Status NewMemManager::InsertVectorsNoLock(const std::string& table_id, +Status NewMemManager::InsertVectorsNoLock(const std::string &table_id, size_t n, - const float* vectors, - IDNumbers& vector_ids) { - - LOG(DEBUG) << "NewMemManager::InsertVectorsNoLock: mutable mem = " << GetCurrentMutableMem() << - ", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem(); + const float *vectors, + IDNumbers &vector_ids) { MemTablePtr mem = GetMemByTable(table_id); VectorSource::Ptr source = std::make_shared(n, vectors); @@ -54,37 +55,33 @@ Status NewMemManager::InsertVectorsNoLock(const std::string& table_id, Status NewMemManager::ToImmutable() { std::unique_lock lock(mutex_); MemIdMap temp_map; - for (auto& kv: mem_id_map_) { - if(kv.second->Empty()) { + for (auto &kv: mem_id_map_) { + if (kv.second->Empty()) { + //empty table, no need to serialize temp_map.insert(kv); - continue;//empty table, no need to serialize + } else { + immu_mem_list_.push_back(kv.second); } - immu_mem_list_.push_back(kv.second); } mem_id_map_.swap(temp_map); return Status::OK(); } -Status NewMemManager::Serialize(std::set& table_ids) { +Status NewMemManager::Serialize(std::set &table_ids) { ToImmutable(); std::unique_lock lock(serialization_mtx_); table_ids.clear(); - for (auto& mem : immu_mem_list_) { + for (auto &mem : immu_mem_list_) { mem->Serialize(); table_ids.insert(mem->GetTableId()); } immu_mem_list_.clear(); -// for (auto mem = immu_mem_list_.begin(); mem != immu_mem_list_.end(); ) { -// (*mem)->Serialize(); -// table_ids.insert((*mem)->GetTableId()); -// mem = immu_mem_list_.erase(mem); -// LOG(DEBUG) << "immu_mem_list_ size = " << immu_mem_list_.size(); -// } + return Status::OK(); } -Status NewMemManager::EraseMemVector(const std::string& table_id) { +Status NewMemManager::EraseMemVector(const std::string &table_id) { {//erase MemVector from rapid-insert cache std::unique_lock lock(mutex_); mem_id_map_.erase(table_id); @@ -93,8 +90,8 @@ Status NewMemManager::EraseMemVector(const std::string& table_id) { {//erase MemVector from serialize cache std::unique_lock lock(serialization_mtx_); MemList temp_list; - for (auto& mem : immu_mem_list_) { - if(mem->GetTableId() != table_id) { + for (auto &mem : immu_mem_list_) { + if (mem->GetTableId() != table_id) { temp_list.push_back(mem); } } @@ -105,20 +102,20 @@ Status NewMemManager::EraseMemVector(const std::string& table_id) { } size_t NewMemManager::GetCurrentMutableMem() { - size_t totalMem = 0; - for (auto& kv : mem_id_map_) { + size_t total_mem = 0; + for (auto &kv : mem_id_map_) { auto memTable = kv.second; - totalMem += memTable->GetCurrentMem(); + total_mem += memTable->GetCurrentMem(); } - return totalMem; + return total_mem; } size_t NewMemManager::GetCurrentImmutableMem() { - size_t totalMem = 0; - for (auto& memTable : immu_mem_list_) { - totalMem += memTable->GetCurrentMem(); + size_t total_mem = 0; + for (auto &mem_table : immu_mem_list_) { + total_mem += mem_table->GetCurrentMem(); } - return totalMem; + return total_mem; } size_t NewMemManager::GetCurrentMem() { diff --git a/cpp/src/db/NewMemManager.h b/cpp/src/db/NewMemManager.h index 9883480404..5b933c94ca 100644 --- a/cpp/src/db/NewMemManager.h +++ b/cpp/src/db/NewMemManager.h @@ -11,25 +11,26 @@ #include #include + namespace zilliz { namespace milvus { namespace engine { class NewMemManager : public MemManagerAbstract { -public: + public: using MetaPtr = meta::Meta::Ptr; using Ptr = std::shared_ptr; using MemTablePtr = typename MemTable::Ptr; - NewMemManager(const std::shared_ptr& meta, const Options& options) - : meta_(meta), options_(options) {} + NewMemManager(const std::shared_ptr &meta, const Options &options) + : meta_(meta), options_(options) {} - Status InsertVectors(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids) override; + Status InsertVectors(const std::string &table_id, + size_t n, const float *vectors, IDNumbers &vector_ids) override; - Status Serialize(std::set& table_ids) override; + Status Serialize(std::set &table_ids) override; - Status EraseMemVector(const std::string& table_id) override; + Status EraseMemVector(const std::string &table_id) override; size_t GetCurrentMutableMem() override; @@ -37,11 +38,11 @@ public: size_t GetCurrentMem() override; -private: - MemTablePtr GetMemByTable(const std::string& table_id); + private: + MemTablePtr GetMemByTable(const std::string &table_id); - Status InsertVectorsNoLock(const std::string& table_id, - size_t n, const float* vectors, IDNumbers& vector_ids); + Status InsertVectorsNoLock(const std::string &table_id, + size_t n, const float *vectors, IDNumbers &vector_ids); Status ToImmutable(); using MemIdMap = std::map; diff --git a/cpp/src/db/VectorSource.cpp b/cpp/src/db/VectorSource.cpp index d032be51f6..74c07ae1f6 100644 --- a/cpp/src/db/VectorSource.cpp +++ b/cpp/src/db/VectorSource.cpp @@ -4,6 +4,7 @@ #include "Log.h" #include "metrics/Metrics.h" + namespace zilliz { namespace milvus { namespace engine { @@ -11,16 +12,16 @@ namespace engine { VectorSource::VectorSource(const size_t &n, const float *vectors) : - n_(n), - vectors_(vectors), - id_generator_(new SimpleIDGenerator()) { + n_(n), + vectors_(vectors), + id_generator_(new SimpleIDGenerator()) { current_num_vectors_added = 0; } -Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, - const meta::TableFileSchema& table_file_schema, - const size_t& num_vectors_to_add, - size_t& num_vectors_added) { +Status VectorSource::Add(const ExecutionEnginePtr &execution_engine, + const meta::TableFileSchema &table_file_schema, + const size_t &num_vectors_to_add, + size_t &num_vectors_added) { auto start_time = METRICS_NOW_TIME; @@ -36,8 +37,7 @@ Status VectorSource::Add(const ExecutionEnginePtr& execution_engine, vector_ids_.insert(vector_ids_.end(), std::make_move_iterator(vector_ids_to_add.begin()), std::make_move_iterator(vector_ids_to_add.end())); - } - else { + } else { ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString(); } diff --git a/cpp/src/db/VectorSource.h b/cpp/src/db/VectorSource.h index dec31f39e1..7092805a6d 100644 --- a/cpp/src/db/VectorSource.h +++ b/cpp/src/db/VectorSource.h @@ -5,22 +5,23 @@ #include "IDGenerator.h" #include "ExecutionEngine.h" + namespace zilliz { namespace milvus { namespace engine { class VectorSource { -public: + public: using Ptr = std::shared_ptr; - VectorSource(const size_t& n, const float* vectors); + VectorSource(const size_t &n, const float *vectors); - Status Add(const ExecutionEnginePtr& execution_engine, - const meta::TableFileSchema& table_file_schema, - const size_t& num_vectors_to_add, - size_t& num_vectors_added); + Status Add(const ExecutionEnginePtr &execution_engine, + const meta::TableFileSchema &table_file_schema, + const size_t &num_vectors_to_add, + size_t &num_vectors_added); size_t GetNumVectorsAdded(); @@ -28,15 +29,15 @@ public: IDNumbers GetVectorIds(); -private: + private: const size_t n_; - const float* vectors_; + const float *vectors_; IDNumbers vector_ids_; size_t current_num_vectors_added; - IDGenerator* id_generator_; + IDGenerator *id_generator_; }; //VectorSource diff --git a/cpp/unittest/db/mem_test.cpp b/cpp/unittest/db/mem_test.cpp index 818c3a6388..5b7972ec35 100644 --- a/cpp/unittest/db/mem_test.cpp +++ b/cpp/unittest/db/mem_test.cpp @@ -15,33 +15,34 @@ #include #include + using namespace zilliz::milvus; namespace { - static const std::string TABLE_NAME = "test_group"; - static constexpr int64_t TABLE_DIM = 256; - static constexpr int64_t VECTOR_COUNT = 250000; - static constexpr int64_t INSERT_LOOP = 10000; +static const std::string TABLE_NAME = "test_group"; +static constexpr int64_t TABLE_DIM = 256; +static constexpr int64_t VECTOR_COUNT = 250000; +static constexpr int64_t INSERT_LOOP = 10000; - engine::meta::TableSchema BuildTableSchema() { - engine::meta::TableSchema table_info; - table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = TABLE_NAME; - table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP; - return table_info; - } +engine::meta::TableSchema BuildTableSchema() { + engine::meta::TableSchema table_info; + table_info.dimension_ = TABLE_DIM; + table_info.table_id_ = TABLE_NAME; + table_info.engine_type_ = (int) engine::EngineType::FAISS_IDMAP; + return table_info; +} - void BuildVectors(int64_t n, std::vector& vectors) { - vectors.clear(); - vectors.resize(n*TABLE_DIM); - float* data = vectors.data(); - for(int i = 0; i < n; i++) { - for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48(); - data[TABLE_DIM * i] += i / 2000.; - } +void BuildVectors(int64_t n, std::vector &vectors) { + vectors.clear(); + vectors.resize(n * TABLE_DIM); + float *data = vectors.data(); + for (int i = 0; i < n; i++) { + for (int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48(); + data[TABLE_DIM * i] += i / 2000.; } } +} TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) { @@ -65,7 +66,7 @@ TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) { size_t num_vectors_added; engine::ExecutionEnginePtr execution_engine_ = engine::EngineFactory::Build(table_file_schema.dimension_, table_file_schema.location_, - (engine::EngineType)table_file_schema.engine_type_); + (engine::EngineType) table_file_schema.engine_type_); status = source.Add(execution_engine_, table_file_schema, 50, num_vectors_added); ASSERT_TRUE(status.ok()); @@ -82,10 +83,6 @@ TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) { vector_ids = source.GetVectorIds(); ASSERT_EQ(vector_ids.size(), 100); -// for (auto& id : vector_ids) { -// std::cout << id << std::endl; -// } - status = impl_->DropAll(); ASSERT_TRUE(status.ok()); } @@ -99,7 +96,7 @@ TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) { auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); - engine::MemTableFile memTableFile(TABLE_NAME, impl_, options); + engine::MemTableFile mem_table_file(TABLE_NAME, impl_, options); int64_t n_100 = 100; std::vector vectors_100; @@ -107,28 +104,28 @@ TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) { engine::VectorSource::Ptr source = std::make_shared(n_100, vectors_100.data()); - status = memTableFile.Add(source); + status = mem_table_file.Add(source); ASSERT_TRUE(status.ok()); -// std::cout << memTableFile.GetCurrentMem() << " " << memTableFile.GetMemLeft() << std::endl; +// std::cout << mem_table_file.GetCurrentMem() << " " << mem_table_file.GetMemLeft() << std::endl; engine::IDNumbers vector_ids = source->GetVectorIds(); ASSERT_EQ(vector_ids.size(), 100); size_t singleVectorMem = sizeof(float) * TABLE_DIM; - ASSERT_EQ(memTableFile.GetCurrentMem(), n_100 * singleVectorMem); + ASSERT_EQ(mem_table_file.GetCurrentMem(), n_100 * singleVectorMem); int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem; std::vector vectors_128M; BuildVectors(n_max, vectors_128M); engine::VectorSource::Ptr source_128M = std::make_shared(n_max, vectors_128M.data()); - status = memTableFile.Add(source_128M); + status = mem_table_file.Add(source_128M); vector_ids = source_128M->GetVectorIds(); ASSERT_EQ(vector_ids.size(), n_max - n_100); - ASSERT_TRUE(memTableFile.IsFull()); + ASSERT_TRUE(mem_table_file.IsFull()); status = impl_->DropAll(); ASSERT_TRUE(status.ok()); @@ -149,34 +146,34 @@ TEST_F(NewMemManagerTest, MEM_TABLE_TEST) { engine::VectorSource::Ptr source_100 = std::make_shared(n_100, vectors_100.data()); - engine::MemTable memTable(TABLE_NAME, impl_, options); + engine::MemTable mem_table(TABLE_NAME, impl_, options); - status = memTable.Add(source_100); + status = mem_table.Add(source_100); ASSERT_TRUE(status.ok()); engine::IDNumbers vector_ids = source_100->GetVectorIds(); ASSERT_EQ(vector_ids.size(), 100); - engine::MemTableFile::Ptr memTableFile; - memTable.GetCurrentMemTableFile(memTableFile); + engine::MemTableFile::Ptr mem_table_file; + mem_table.GetCurrentMemTableFile(mem_table_file); size_t singleVectorMem = sizeof(float) * TABLE_DIM; - ASSERT_EQ(memTableFile->GetCurrentMem(), n_100 * singleVectorMem); + ASSERT_EQ(mem_table_file->GetCurrentMem(), n_100 * singleVectorMem); int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem; std::vector vectors_128M; BuildVectors(n_max, vectors_128M); engine::VectorSource::Ptr source_128M = std::make_shared(n_max, vectors_128M.data()); - status = memTable.Add(source_128M); + status = mem_table.Add(source_128M); ASSERT_TRUE(status.ok()); vector_ids = source_128M->GetVectorIds(); ASSERT_EQ(vector_ids.size(), n_max); - memTable.GetCurrentMemTableFile(memTableFile); - ASSERT_EQ(memTableFile->GetCurrentMem(), n_100 * singleVectorMem); + mem_table.GetCurrentMemTableFile(mem_table_file); + ASSERT_EQ(mem_table_file->GetCurrentMem(), n_100 * singleVectorMem); - ASSERT_EQ(memTable.GetTableFileCount(), 2); + ASSERT_EQ(mem_table.GetTableFileCount(), 2); int64_t n_1G = 1024000; std::vector vectors_1G; @@ -184,16 +181,16 @@ TEST_F(NewMemManagerTest, MEM_TABLE_TEST) { engine::VectorSource::Ptr source_1G = std::make_shared(n_1G, vectors_1G.data()); - status = memTable.Add(source_1G); + status = mem_table.Add(source_1G); ASSERT_TRUE(status.ok()); vector_ids = source_1G->GetVectorIds(); ASSERT_EQ(vector_ids.size(), n_1G); int expectedTableFileCount = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM); - ASSERT_EQ(memTable.GetTableFileCount(), expectedTableFileCount); + ASSERT_EQ(mem_table.GetTableFileCount(), expectedTableFileCount); - status = memTable.Serialize(); + status = mem_table.Serialize(); ASSERT_TRUE(status.ok()); status = impl_->DropAll(); @@ -216,7 +213,7 @@ TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) { ASSERT_STATS(stat); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); - std::map> search_vectors; + std::map> search_vectors; { engine::IDNumbers vector_ids; int64_t nb = 1024000; @@ -231,8 +228,8 @@ TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) { std::mt19937 gen(rd()); std::uniform_int_distribution dis(0, nb - 1); - int64_t numQuery = 20; - for (int64_t i = 0; i < numQuery; ++i) { + int64_t num_query = 20; + for (int64_t i = 0; i < num_query; ++i) { int64_t index = dis(gen); std::vector search; for (int64_t j = 0; j < TABLE_DIM; j++) { @@ -243,8 +240,8 @@ TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) { } int k = 10; - for(auto& pair : search_vectors) { - auto& search = pair.second; + for (auto &pair : search_vectors) { + auto &search = pair.second; engine::QueryResults results; stat = db_->Query(TABLE_NAME, k, 1, search.data(), results); ASSERT_EQ(results[0][0].first, pair.first); @@ -329,18 +326,18 @@ TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) { uint64_t count = 0; uint64_t prev_count = 0; - for (auto j=0; j<10; ++j) { + for (auto j = 0; j < 10; ++j) { ss.str(""); db_->Size(count); prev_count = count; START_TIMER; stat = db_->Query(TABLE_NAME, k, qb, qxb.data(), results); - ss << "Search " << j << " With Size " << count/engine::meta::M << " M"; + ss << "Search " << j << " With Size " << count / engine::meta::M << " M"; STOP_TIMER(ss.str()); ASSERT_STATS(stat); - for (auto k=0; kInsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); } else { diff --git a/cpp/unittest/db/utils.h b/cpp/unittest/db/utils.h index d06500de5c..9c126030c2 100644 --- a/cpp/unittest/db/utils.h +++ b/cpp/unittest/db/utils.h @@ -30,7 +30,7 @@ #define STOP_TIMER(name) #endif -void ASSERT_STATS(zilliz::milvus::engine::Status& stat); +void ASSERT_STATS(zilliz::milvus::engine::Status &stat); //class TestEnv : public ::testing::Environment { //public: @@ -54,8 +54,8 @@ void ASSERT_STATS(zilliz::milvus::engine::Status& stat); // ::testing::AddGlobalTestEnvironment(new TestEnv); class DBTest : public ::testing::Test { -protected: - zilliz::milvus::engine::DB* db_; + protected: + zilliz::milvus::engine::DB *db_; void InitLog(); virtual void SetUp() override; @@ -64,13 +64,13 @@ protected: }; class DBTest2 : public DBTest { -protected: + protected: virtual zilliz::milvus::engine::Options GetOptions() override; }; class MetaTest : public DBTest { -protected: + protected: std::shared_ptr impl_; virtual void SetUp() override; @@ -78,17 +78,17 @@ protected: }; class MySQLTest : public ::testing::Test { -protected: + protected: // std::shared_ptr impl_; zilliz::milvus::engine::DBMetaOptions getDBMetaOptions(); }; -class MySQLDBTest : public ::testing::Test { -protected: +class MySQLDBTest : public ::testing::Test { + protected: zilliz::milvus::engine::Options GetOptions(); }; -class NewMemManagerTest : public ::testing::Test { +class NewMemManagerTest : public ::testing::Test { void InitLog(); - virtual void SetUp() override; + void SetUp() override; };