diff --git a/CHANGELOG.md b/CHANGELOG.md index 54051db0d2..6540e2392f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,11 +17,14 @@ Please mark all change in change log and use the issue from GitHub - \#1661 HNSW support deleted vectors searching - \#1825 Add annoy index type in C++ sdk - \#1849 NSG support deleted vectors searching +- \#1893 Log config information and device information ## Improvement - \#1627 Move read/write index APIs into codec - \#1784 Add Substructure and Superstructure in http module - \#1858 Disable S3 build +- \#1882 Add index annoy into http module +- \#1886 Refactor log on search and insert request ## Task diff --git a/core/src/config/Config.cpp b/core/src/config/Config.cpp index d8165c4403..e7ef144370 100644 --- a/core/src/config/Config.cpp +++ b/core/src/config/Config.cpp @@ -314,9 +314,9 @@ Config::ResetDefaultConfig() { } void -Config::GetConfigJsonStr(std::string& result) { +Config::GetConfigJsonStr(std::string& result, int64_t indent) { nlohmann::json config_json(config_map_); - result = config_json.dump(); + result = config_json.dump(indent); } Status diff --git a/core/src/config/Config.h b/core/src/config/Config.h index 79c4fdff8f..6ed6d443b0 100644 --- a/core/src/config/Config.h +++ b/core/src/config/Config.h @@ -161,7 +161,7 @@ class Config { Status ResetDefaultConfig(); void - GetConfigJsonStr(std::string& result); + GetConfigJsonStr(std::string& result, int64_t indent = -1); Status ProcessConfigCli(std::string& result, const std::string& cmd); diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index 39a15c4a71..e361ce3024 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -508,6 +508,7 @@ DBImpl::InsertVectors(const std::string& collection_id, const std::string& parti SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance(); Status status = id_generator.GetNextIDNumbers(vectors.vector_count_, vectors.id_array_); if (!status.ok()) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Get next id number fail: %s", "insert", 0, status.message().c_str()); return status; } } @@ -517,6 +518,7 @@ DBImpl::InsertVectors(const std::string& collection_id, const std::string& parti std::string target_collection_name; status = GetPartitionByTag(collection_id, partition_tag, target_collection_name); if (!status.ok()) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Get partition fail: %s", "insert", 0, status.message().c_str()); return status; } @@ -1215,7 +1217,7 @@ DBImpl::QueryAsync(const std::shared_ptr& context, const meta:: // step 1: construct search job auto status = OngoingFileChecker::GetInstance().MarkOngoingFiles(files); - ENGINE_LOG_DEBUG << "Engine query begin, index file count: " << files.size(); + ENGINE_LOG_DEBUG << LogOut("Engine query begin, index file count: %ld", files.size()); scheduler::SearchJobPtr job = std::make_shared(tracer.Context(), k, extra_params, vectors); for (auto& file : files) { scheduler::SegmentSchemaPtr file_ptr = std::make_shared(file); @@ -1856,6 +1858,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { std::string target_collection_name; status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name); if (!status.ok()) { + WAL_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message(); return status; } @@ -1875,6 +1878,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { std::string target_collection_name; status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name); if (!status.ok()) { + WAL_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message(); return status; } diff --git a/core/src/db/engine/ExecutionEngineImpl.cpp b/core/src/db/engine/ExecutionEngineImpl.cpp index fd6d53f599..efdedae6f3 100644 --- a/core/src/db/engine/ExecutionEngineImpl.cpp +++ b/core/src/db/engine/ExecutionEngineImpl.cpp @@ -763,10 +763,10 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu } } #endif - TimeRecorder rc("ExecutionEngineImpl::Search float"); + TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search float", "search", 0)); if (index_ == nullptr) { - ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search"; + ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0); return Status(DB_ERROR, "index is null"); } @@ -774,6 +774,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu conf[knowhere::meta::TOPK] = k; auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type()); if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0); throw Exception(DB_ERROR, "Illegal search params"); } @@ -786,7 +787,8 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu auto result = index_->Query(dataset, conf); rc.RecordSection("query done"); - ENGINE_LOG_DEBUG << "get uids " << index_->GetUids().size() << " from index " << location_; + ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(), + location_.c_str()); MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels); rc.RecordSection("map uids " + std::to_string(n * k)); @@ -800,10 +802,10 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu Status ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const milvus::json& extra_params, float* distances, int64_t* labels, bool hybrid) { - TimeRecorder rc("ExecutionEngineImpl::Search uint8"); + TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search uint8", "search", 0)); if (index_ == nullptr) { - ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search"; + ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0); return Status(DB_ERROR, "index is null"); } @@ -811,6 +813,7 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil conf[knowhere::meta::TOPK] = k; auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type()); if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0); throw Exception(DB_ERROR, "Illegal search params"); } @@ -823,7 +826,8 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil auto result = index_->Query(dataset, conf); rc.RecordSection("query done"); - ENGINE_LOG_DEBUG << "get uids " << index_->GetUids().size() << " from index " << location_; + ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(), + location_.c_str()); MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels); rc.RecordSection("map uids " + std::to_string(n * k)); @@ -837,10 +841,10 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil Status ExecutionEngineImpl::Search(int64_t n, const std::vector& ids, int64_t k, const milvus::json& extra_params, float* distances, int64_t* labels, bool hybrid) { - TimeRecorder rc("ExecutionEngineImpl::Search vector of ids"); + TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search vector of ids", "search", 0)); if (index_ == nullptr) { - ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search"; + ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0); return Status(DB_ERROR, "index is null"); } @@ -848,6 +852,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector& ids, int64_t conf[knowhere::meta::TOPK] = k; auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type()); if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0); throw Exception(DB_ERROR, "Illegal search params"); } @@ -901,7 +906,8 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector& ids, int64_t auto result = index_->QueryById(dataset, conf); rc.RecordSection("query by id done"); - ENGINE_LOG_DEBUG << "get uids " << index_->GetUids().size() << " from index " << location_; + ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(), + location_.c_str()); MapAndCopyResult(result, uids, offsets.size(), k, distances, labels); rc.RecordSection("map uids " + std::to_string(offsets.size() * k)); } diff --git a/core/src/db/insert/MemManagerImpl.cpp b/core/src/db/insert/MemManagerImpl.cpp index cef2d9803e..c35fc42ba8 100644 --- a/core/src/db/insert/MemManagerImpl.cpp +++ b/core/src/db/insert/MemManagerImpl.cpp @@ -62,10 +62,12 @@ MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length, const uint8_t* vectors, uint64_t lsn, std::set& flushed_tables) { flushed_tables.clear(); if (GetCurrentMem() > options_.insert_buffer_size_) { - ENGINE_LOG_DEBUG << "Insert buffer size exceeds limit. Performing force flush"; + ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0) + << "Insert buffer size exceeds limit. Performing force flush"; // TODO(zhiru): Don't apply delete here in order to avoid possible concurrency issues with Merge auto status = Flush(flushed_tables, false); if (!status.ok()) { + ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0) << "Flush fail: " << status.message(); return status; } } diff --git a/core/src/db/insert/MemTable.cpp b/core/src/db/insert/MemTable.cpp index 535119fddc..84b8306522 100644 --- a/core/src/db/insert/MemTable.cpp +++ b/core/src/db/insert/MemTable.cpp @@ -53,7 +53,7 @@ MemTable::Add(const VectorSourcePtr& source) { if (!status.ok()) { std::string err_msg = "Insert failed: " + status.ToString(); - ENGINE_LOG_ERROR << err_msg; + ENGINE_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << err_msg; return Status(DB_ERROR, err_msg); } } diff --git a/core/src/db/insert/MemTableFile.cpp b/core/src/db/insert/MemTableFile.cpp index 2055f6f478..48b21c6b8b 100644 --- a/core/src/db/insert/MemTableFile.cpp +++ b/core/src/db/insert/MemTableFile.cpp @@ -65,7 +65,7 @@ MemTableFile::Add(const VectorSourcePtr& source) { std::string err_msg = "MemTableFile::Add: table_file_schema dimension = " + std::to_string(table_file_schema_.dimension_) + ", collection_id = " + table_file_schema_.collection_id_; - ENGINE_LOG_ERROR << err_msg; + ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << err_msg; return Status(DB_ERROR, "Not able to create collection file"); } diff --git a/core/src/db/insert/VectorSource.cpp b/core/src/db/insert/VectorSource.cpp index 0af40271ce..d738787d26 100644 --- a/core/src/db/insert/VectorSource.cpp +++ b/core/src/db/insert/VectorSource.cpp @@ -40,6 +40,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment: SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance(); Status status = id_generator.GetNextIDNumbers(num_vectors_added, vector_ids_to_add); if (!status.ok()) { + ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Generate ids fail: " << status.message(); return status; } } else { @@ -61,6 +62,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment: vectors.resize(size); memcpy(vectors.data(), vectors_.float_data_.data() + current_num_vectors_added * table_file_schema.dimension_, size); + ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment"; status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add); } else if (!vectors_.binary_data_.empty()) { @@ -77,6 +79,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment: vectors.data(), vectors_.binary_data_.data() + current_num_vectors_added * SingleVectorSize(table_file_schema.dimension_), size); + ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment"; status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add); } @@ -87,7 +90,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment: vector_ids_.insert(vector_ids_.end(), std::make_move_iterator(vector_ids_to_add.begin()), std::make_move_iterator(vector_ids_to_add.end())); } else { - ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString(); + ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "VectorSource::Add failed: " + status.ToString(); } return status; diff --git a/core/src/db/wal/WalManager.cpp b/core/src/db/wal/WalManager.cpp index e58c35e056..b4beee671b 100644 --- a/core/src/db/wal/WalManager.cpp +++ b/core/src/db/wal/WalManager.cpp @@ -243,7 +243,7 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio size_t vector_num = vector_ids.size(); if (vector_num == 0) { - WAL_LOG_ERROR << "The ids is empty."; + WAL_LOG_ERROR << LogOut("[%s][%ld] The ids is empty.", "insert", 0); return false; } size_t dim = vectors.size() / vector_num; @@ -265,7 +265,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio max_rcd_num = (mxlog_config_.buffer_size - head_size) / unit_size; } if (max_rcd_num == 0) { - WAL_LOG_ERROR << "Wal buffer size is too small " << mxlog_config_.buffer_size << " unit " << unit_size; + WAL_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Wal buffer size is too small " + << mxlog_config_.buffer_size << " unit " << unit_size; return false; } @@ -290,7 +291,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio } lck.unlock(); - WAL_LOG_INFO << collection_id << " insert in part " << partition_tag << " with lsn " << new_lsn; + WAL_LOG_INFO << LogOut("[%s][%ld]", "insert", 0) << collection_id << " insert in part " << partition_tag + << " with lsn " << new_lsn; return p_meta_handler_->SetMXLogInternalMeta(new_lsn); } diff --git a/core/src/scheduler/SchedInst.h b/core/src/scheduler/SchedInst.h index 7ddea4f5fa..3e89cfabbe 100644 --- a/core/src/scheduler/SchedInst.h +++ b/core/src/scheduler/SchedInst.h @@ -110,14 +110,14 @@ class OptimizerInst { for (auto build_id : build_gpus) { build_msg.append(" gpu" + std::to_string(build_id)); } - SERVER_LOG_DEBUG << build_msg; + SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str()); std::string search_msg = "Search gpu:"; for (auto search_id : search_gpus) { search_msg.append(" gpu" + std::to_string(search_id)); } search_msg.append(". gpu_search_threshold:" + std::to_string(gpu_search_threshold)); - SERVER_LOG_DEBUG << search_msg; + SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str()); pass_list.push_back(std::make_shared()); pass_list.push_back(std::make_shared()); diff --git a/core/src/scheduler/job/SearchJob.cpp b/core/src/scheduler/job/SearchJob.cpp index a83bffa1b7..b19256ce4e 100644 --- a/core/src/scheduler/job/SearchJob.cpp +++ b/core/src/scheduler/job/SearchJob.cpp @@ -28,7 +28,7 @@ SearchJob::AddIndexFile(const SegmentSchemaPtr& index_file) { return false; } - SERVER_LOG_DEBUG << "SearchJob " << id() << " add index file: " << index_file->id_; + SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld add index file: %ld", "search", 0, id(), index_file->id_); index_files_[index_file->id_] = index_file; return true; @@ -38,7 +38,7 @@ void SearchJob::WaitResult() { std::unique_lock lock(mutex_); cv_.wait(lock, [this] { return index_files_.empty(); }); - SERVER_LOG_DEBUG << "SearchJob " << id() << " all done"; + SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld all done", "search", 0, id()); } void @@ -49,7 +49,7 @@ SearchJob::SearchDone(size_t index_id) { cv_.notify_all(); } - SERVER_LOG_DEBUG << "SearchJob " << id() << " finish index file: " << index_id; + SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld finish index file: %ld", "search", 0, id(), index_id); } ResultIds& diff --git a/core/src/scheduler/optimizer/FaissFlatPass.cpp b/core/src/scheduler/optimizer/FaissFlatPass.cpp index d0016727e4..c0fb4780ad 100644 --- a/core/src/scheduler/optimizer/FaissFlatPass.cpp +++ b/core/src/scheduler/optimizer/FaissFlatPass.cpp @@ -54,14 +54,16 @@ FaissFlatPass::Run(const TaskPtr& task) { auto search_job = std::static_pointer_cast(search_task->job_.lock()); ResourcePtr res_ptr; if (!gpu_enable_) { - SERVER_LOG_DEBUG << "FaissFlatPass: gpu disable, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: gpu disable, specify cpu to search!", "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else if (search_job->nq() < threshold_) { - SERVER_LOG_DEBUG << "FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!", + "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else { auto best_device_id = count_ % search_gpus_.size(); - SERVER_LOG_DEBUG << "FaissFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq > gpu_search_threshold, specify gpu %d to search!", + best_device_id, "search", 0); ++count_; res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]); } diff --git a/core/src/scheduler/optimizer/FaissIVFFlatPass.cpp b/core/src/scheduler/optimizer/FaissIVFFlatPass.cpp index 42b171e160..61c1f44e4f 100644 --- a/core/src/scheduler/optimizer/FaissIVFFlatPass.cpp +++ b/core/src/scheduler/optimizer/FaissIVFFlatPass.cpp @@ -55,15 +55,16 @@ FaissIVFFlatPass::Run(const TaskPtr& task) { auto search_job = std::static_pointer_cast(search_task->job_.lock()); ResourcePtr res_ptr; if (!gpu_enable_) { - SERVER_LOG_DEBUG << "FaissIVFFlatPass: gpu disable, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: gpu disable, specify cpu to search!", "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else if (search_job->nq() < threshold_) { - SERVER_LOG_DEBUG << "FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!", + "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else { auto best_device_id = count_ % search_gpus_.size(); - SERVER_LOG_DEBUG << "FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id - << " to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu %d to search!", + "search", 0, best_device_id); count_++; res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]); } diff --git a/core/src/scheduler/optimizer/FaissIVFPQPass.cpp b/core/src/scheduler/optimizer/FaissIVFPQPass.cpp index 6ca1971232..6a83d39897 100644 --- a/core/src/scheduler/optimizer/FaissIVFPQPass.cpp +++ b/core/src/scheduler/optimizer/FaissIVFPQPass.cpp @@ -57,14 +57,16 @@ FaissIVFPQPass::Run(const TaskPtr& task) { auto search_job = std::static_pointer_cast(search_task->job_.lock()); ResourcePtr res_ptr; if (!gpu_enable_) { - SERVER_LOG_DEBUG << "FaissIVFPQPass: gpu disable, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: gpu disable, specify cpu to search!", "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else if (search_job->nq() < threshold_) { - SERVER_LOG_DEBUG << "FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!", + "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else { auto best_device_id = count_ % search_gpus_.size(); - SERVER_LOG_DEBUG << "FaissIVFPQPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq > gpu_search_threshold, specify gpu %d to search!", + "search", 0, best_device_id); ++count_; res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]); } diff --git a/core/src/scheduler/optimizer/FaissIVFSQ8HPass.cpp b/core/src/scheduler/optimizer/FaissIVFSQ8HPass.cpp index c7ed1e409b..5a78089ab3 100644 --- a/core/src/scheduler/optimizer/FaissIVFSQ8HPass.cpp +++ b/core/src/scheduler/optimizer/FaissIVFSQ8HPass.cpp @@ -54,16 +54,17 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) { auto search_job = std::static_pointer_cast(search_task->job_.lock()); ResourcePtr res_ptr; if (!gpu_enable_) { - SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: gpu disable, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: gpu disable, specify cpu to search!", "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } if (search_job->nq() < threshold_) { - SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!", + "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else { auto best_device_id = count_ % search_gpus_.size(); - SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu" << best_device_id - << " to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu %d to search!", + "search", 0, best_device_id); ++count_; res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]); } diff --git a/core/src/scheduler/optimizer/FaissIVFSQ8Pass.cpp b/core/src/scheduler/optimizer/FaissIVFSQ8Pass.cpp index bddee247ad..e89fab0fed 100644 --- a/core/src/scheduler/optimizer/FaissIVFSQ8Pass.cpp +++ b/core/src/scheduler/optimizer/FaissIVFSQ8Pass.cpp @@ -55,15 +55,16 @@ FaissIVFSQ8Pass::Run(const TaskPtr& task) { auto search_job = std::static_pointer_cast(search_task->job_.lock()); ResourcePtr res_ptr; if (!gpu_enable_) { - SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: gpu disable, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: gpu disable, specify cpu to search!", "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else if (search_job->nq() < threshold_) { - SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!", + "search", 0); res_ptr = ResMgrInst::GetInstance()->GetResource("cpu"); } else { auto best_device_id = count_ % search_gpus_.size(); - SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu" << best_device_id - << " to search!"; + SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu %d to search!", + "search", 0, best_device_id); count_++; res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]); } diff --git a/core/src/scheduler/task/SearchTask.cpp b/core/src/scheduler/task/SearchTask.cpp index 3609715535..85390dd82a 100644 --- a/core/src/scheduler/task/SearchTask.cpp +++ b/core/src/scheduler/task/SearchTask.cpp @@ -132,7 +132,7 @@ void XSearchTask::Load(LoadType type, uint8_t device_id) { milvus::server::ContextFollower tracer(context_, "XSearchTask::Load " + std::to_string(file_->id_)); - TimeRecorder rc(""); + TimeRecorder rc(LogOut("[%s][%ld]", "search", 0)); Status stat = Status::OK(); std::string error_msg; std::string type_str; @@ -159,6 +159,7 @@ XSearchTask::Load(LoadType type, uint8_t device_id) { } catch (std::exception& ex) { // typical error: out of disk space or permition denied error_msg = "Failed to load index file: " + std::string(ex.what()); + ENGINE_LOG_ERROR << LogOut("[%s][%ld] Encounter execption: %s", "search", 0, error_msg.c_str()); stat = Status(SERVER_UNEXPECTED_ERROR, error_msg); } fiu_do_on("XSearchTask.Load.out_of_memory", stat = Status(SERVER_UNEXPECTED_ERROR, "out of memory")); @@ -211,7 +212,8 @@ XSearchTask::Execute() { // ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_ << " with " // << search_contexts_.size() << " tasks"; - TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_)); + // TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_)); + TimeRecorder rc(LogOut("[%s][%ld] DoSearch file id:%ld", "search", 0, index_id_)); server::CollectDurationMetrics metrics(index_type_); @@ -265,7 +267,8 @@ XSearchTask::Execute() { // step 3: pick up topk result auto spec_k = file_->row_count_ < topk ? file_->row_count_ : topk; if (spec_k == 0) { - ENGINE_LOG_WARNING << "Searching in an empty file. file location = " << file_->location_; + ENGINE_LOG_WARNING << LogOut("[%s][%ld] Searching in an empty file. file location = %s", "search", 0, + file_->location_.c_str()); } { @@ -286,7 +289,7 @@ XSearchTask::Execute() { span = rc.RecordSection(hdr + ", reduce topk"); // search_job->AccumReduceCost(span); } catch (std::exception& ex) { - ENGINE_LOG_ERROR << "SearchTask encounter exception: " << ex.what(); + ENGINE_LOG_ERROR << LogOut("[%s][%ld] SearchTask encounter exception: %s", "search", 0, ex.what()); // search_job->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed } @@ -305,6 +308,7 @@ XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const sch size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, scheduler::ResultDistances& tar_distances) { if (src_ids.empty()) { + ENGINE_LOG_DEBUG << LogOut("[%s][%d] Search result is empty.", "search", 0); return; } diff --git a/core/src/segment/SegmentWriter.cpp b/core/src/segment/SegmentWriter.cpp index 2589cb565c..859d42799c 100644 --- a/core/src/segment/SegmentWriter.cpp +++ b/core/src/segment/SegmentWriter.cpp @@ -61,6 +61,7 @@ SegmentWriter::Serialize() { auto status = WriteBloomFilter(); if (!status.ok()) { + ENGINE_LOG_ERROR << status.message(); return status; } @@ -70,8 +71,10 @@ SegmentWriter::Serialize() { start = std::chrono::high_resolution_clock::now(); + ENGINE_LOG_DEBUG << "Write vectors"; status = WriteVectors(); if (!status.ok()) { + ENGINE_LOG_ERROR << "Write vectors fail: " << status.message(); return status; } diff --git a/core/src/server/Server.cpp b/core/src/server/Server.cpp index 6036fa0333..63eb25d5d9 100644 --- a/core/src/server/Server.cpp +++ b/core/src/server/Server.cpp @@ -193,6 +193,11 @@ Server::Start() { #else SERVER_LOG_INFO << "CPU edition"; #endif + /* record config and hardware information into log */ + LogConfigInFile(config_filename_); + LogCpuInfo(); + LogConfigInMem(); + server::Metrics::GetInstance().Init(); server::SystemInfo::GetInstance().Init(); diff --git a/core/src/server/delivery/request/InsertRequest.cpp b/core/src/server/delivery/request/InsertRequest.cpp index 8e284a3aac..10e0752146 100644 --- a/core/src/server/delivery/request/InsertRequest.cpp +++ b/core/src/server/delivery/request/InsertRequest.cpp @@ -46,28 +46,32 @@ InsertRequest::Create(const std::shared_ptr& context, c Status InsertRequest::OnExecute() { + SERVER_LOG_INFO << LogOut("[%s][%ld] ", "insert", 0) << "Execute insert request."; try { int64_t vector_count = vectors_data_.vector_count_; fiu_do_on("InsertRequest.OnExecute.throw_std_exception", throw std::exception()); std::string hdr = "InsertRequest(collection=" + collection_name_ + ", n=" + std::to_string(vector_count) + ", partition_tag=" + partition_tag_ + ")"; - TimeRecorder rc(hdr); + TimeRecorder rc(LogOut("[%s][%ld] %s", "insert", 0, hdr.c_str())); // step 1: check arguments auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid collection name: %s", "insert", 0, status.message().c_str()); return status; } if (vectors_data_.float_data_.empty() && vectors_data_.binary_data_.empty()) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, - "The vector array is empty. Make sure you have entered vector records."); + std::string msg = "The vector array is empty. Make sure you have entered vector records."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid records: %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg); } fiu_do_on("InsertRequest.OnExecute.id_array_error", vectors_data_.id_array_.resize(vector_count + 1)); if (!vectors_data_.id_array_.empty()) { if (vectors_data_.id_array_.size() != vector_count) { - return Status(SERVER_ILLEGAL_VECTOR_ID, - "The size of vector ID array must be equal to the size of the vector."); + std::string msg = "The size of vector ID array must be equal to the size of the vector."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid id array: %s", "insert", 0, msg.c_str()); + return Status(SERVER_ILLEGAL_VECTOR_ID, msg); } } @@ -80,12 +84,17 @@ InsertRequest::OnExecute() { fiu_do_on("InsertRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { + SERVER_LOG_ERROR << LogOut("[%s][%ld] Collection %s not found", "insert", 0, collection_name_.c_str()); return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { + SERVER_LOG_ERROR << LogOut("[%s][%ld] Describe collection %s fail: %s", "insert", 0, + collection_name_.c_str(), status.message().c_str()); return status; } } else { if (!table_schema.owner_collection_.empty()) { + SERVER_LOG_ERROR << LogOut("[%s][%ld] owner collection of %s is empty", "insert", 0, + collection_name_.c_str()); return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } @@ -97,17 +106,18 @@ InsertRequest::OnExecute() { table_schema.flag_ = engine::meta::FLAG_MASK_HAS_USERID); // user already provided id before, all insert action require user id if ((table_schema.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) { - return Status(SERVER_ILLEGAL_VECTOR_ID, - "Entities IDs are user-defined. Please provide IDs for all entities of the collection."); + std::string msg = "Entities IDs are user-defined. Please provide IDs for all entities of the collection."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_ILLEGAL_VECTOR_ID, msg); } fiu_do_on("InsertRequest.OnExecute.illegal_vector_id2", user_provide_ids = true; table_schema.flag_ = engine::meta::FLAG_MASK_NO_USERID); // user didn't provided id before, no need to provide user id if ((table_schema.flag_ & engine::meta::FLAG_MASK_NO_USERID) != 0 && user_provide_ids) { - return Status( - SERVER_ILLEGAL_VECTOR_ID, - "Entities IDs are auto-generated. All vectors of this collection must use auto-generated IDs."); + std::string msg = + "Entities IDs are auto-generated. All vectors of this collection must use auto-generated IDs."; + return Status(SERVER_ILLEGAL_VECTOR_ID, msg); } rc.RecordSection("check validation"); @@ -119,34 +129,42 @@ InsertRequest::OnExecute() { // step 4: some metric type doesn't support float vectors if (!vectors_data_.float_data_.empty()) { // insert float vectors if (engine::utils::IsBinaryMetricType(table_schema.metric_type_)) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support float vectors."); + std::string msg = "Collection metric type doesn't support float vectors."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg); } // check prepared float data if (vectors_data_.float_data_.size() % vector_count != 0) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, - "The vector dimension must be equal to the collection dimension."); + std::string msg = "The vector dimension must be equal to the collection dimension."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg); } fiu_do_on("InsertRequest.OnExecute.invalid_dim", table_schema.dimension_ = -1); if (vectors_data_.float_data_.size() / vector_count != table_schema.dimension_) { - return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the collection dimension."); + std::string msg = "The vector dimension must be equal to the collection dimension."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_VECTOR_DIMENSION, msg); } } else if (!vectors_data_.binary_data_.empty()) { // insert binary vectors if (!engine::utils::IsBinaryMetricType(table_schema.metric_type_)) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support binary vectors."); + std::string msg = "Collection metric type doesn't support binary vectors."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg); } // check prepared binary data if (vectors_data_.binary_data_.size() % vector_count != 0) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, - "The vector dimension must be equal to the collection dimension."); + std::string msg = "The vector dimension must be equal to the collection dimension."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg); } if (vectors_data_.binary_data_.size() * 8 / vector_count != table_schema.dimension_) { - return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the collection dimension."); + std::string msg = "The vector dimension must be equal to the collection dimension."; + SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str()); + return Status(SERVER_INVALID_VECTOR_DIMENSION, msg); } } @@ -157,6 +175,7 @@ InsertRequest::OnExecute() { status = DBWrapper::DB()->InsertVectors(collection_name_, partition_tag_, vectors_data_); fiu_do_on("InsertRequest.OnExecute.insert_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, status.message().c_str()); return status; } @@ -165,6 +184,7 @@ InsertRequest::OnExecute() { if (ids_size != vec_count) { std::string msg = "Add " + std::to_string(vec_count) + " vectors but only return " + std::to_string(ids_size) + " id"; + SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, msg.c_str()); return Status(SERVER_ILLEGAL_VECTOR_ID, msg); } @@ -180,6 +200,7 @@ InsertRequest::OnExecute() { rc.RecordSection("add vectors to engine"); rc.ElapseFromBegin("total cost"); } catch (std::exception& ex) { + SERVER_LOG_ERROR << LogOut("[%s][%ld] Encounter exception: %s", "insert", 0, ex.what()); return Status(SERVER_UNEXPECTED_ERROR, ex.what()); } diff --git a/core/src/server/delivery/request/SearchRequest.cpp b/core/src/server/delivery/request/SearchRequest.cpp index 7e45d68ca0..8930db8964 100644 --- a/core/src/server/delivery/request/SearchRequest.cpp +++ b/core/src/server/delivery/request/SearchRequest.cpp @@ -10,6 +10,11 @@ // or implied. See the License for the specific language governing permissions and limitations under the License. #include "server/delivery/request/SearchRequest.h" + +#include + +#include + #include "db/Utils.h" #include "server/DBWrapper.h" #include "utils/CommonUtil.h" @@ -17,9 +22,6 @@ #include "utils/TimeRecorder.h" #include "utils/ValidationUtil.h" -#include -#include - #ifdef MILVUS_ENABLE_PROFILING #include #endif @@ -52,19 +54,22 @@ SearchRequest::Create(const std::shared_ptr& context, c Status SearchRequest::OnPreExecute() { + SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search pre-execute. Check search parameters"; std::string hdr = "SearchRequest pre-execute(collection=" + collection_name_ + ")"; - TimeRecorderAuto rc(hdr); + TimeRecorderAuto rc(LogOut("[%s][%ld] %s", "search", 0, hdr.c_str())); milvus::server::ContextChild tracer_pre(context_, "Pre Query"); // step 1: check table name auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str()); return status; } // step 2: check search topk status = ValidationUtil::ValidateSearchTopk(topk_); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str()); return status; } @@ -72,6 +77,7 @@ SearchRequest::OnPreExecute() { status = ValidationUtil::ValidatePartitionTags(partition_list_); fiu_do_on("SearchRequest.OnExecute.invalid_partition_tags", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str()); return status; } @@ -80,12 +86,13 @@ SearchRequest::OnPreExecute() { Status SearchRequest::OnExecute() { + SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search execute."; try { uint64_t vector_count = vectors_data_.vector_count_; fiu_do_on("SearchRequest.OnExecute.throw_std_exception", throw std::exception()); std::string hdr = "SearchRequest execute(collection=" + collection_name_ + ", nq=" + std::to_string(vector_count) + ", k=" + std::to_string(topk_) + ")"; - TimeRecorderAuto rc(hdr); + TimeRecorderAuto rc(LogOut("[%s][%d] %s", "search", 0, hdr.c_str())); // step 4: check table existence // only process root table, ignore partition table @@ -95,12 +102,17 @@ SearchRequest::OnExecute() { fiu_do_on("SearchRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { + SERVER_LOG_ERROR << LogOut("[%s][%d] Collection %s not found: %s", "search", 0, + collection_name_.c_str(), status.message().c_str()); return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { + SERVER_LOG_ERROR << LogOut("[%s][%d] Error occurred when describing collection %s: %s", "search", 0, + collection_name_.c_str(), status.message().c_str()); return status; } } else { if (!collection_schema_.owner_collection_.empty()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, TableNotExistMsg(collection_name_).c_str()); return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } @@ -108,12 +120,14 @@ SearchRequest::OnExecute() { // step 5: check search parameters status = ValidationUtil::ValidateSearchParams(extra_params_, collection_schema_, topk_); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid search params: %s", "search", 0, status.message().c_str()); return status; } // step 6: check vector data according to metric type status = ValidationUtil::ValidateVectorData(vectors_data_, collection_schema_); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid vector data: %s", "search", 0, status.message().c_str()); return status; } @@ -143,6 +157,7 @@ SearchRequest::OnExecute() { #endif fiu_do_on("SearchRequest.OnExecute.query_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { + SERVER_LOG_ERROR << LogOut("[%s][%d] Query fail: %s", "search", 0, status.message().c_str()); return status; } fiu_do_on("SearchRequest.OnExecute.empty_result_ids", result_ids.clear()); @@ -157,6 +172,7 @@ SearchRequest::OnExecute() { result_.distance_list_.swap(result_distances); rc.RecordSection("construct result"); } catch (std::exception& ex) { + SERVER_LOG_ERROR << LogOut("[%s][%d] Encounter exception: %s", "search", 0, ex.what()); return Status(SERVER_UNEXPECTED_ERROR, ex.what()); } diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.cpp b/core/src/server/grpc_impl/GrpcRequestHandler.cpp index 8d5523e06d..2650b8fe6b 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.cpp +++ b/core/src/server/grpc_impl/GrpcRequestHandler.cpp @@ -19,6 +19,7 @@ #include "tracing/TextMapCarrier.h" #include "tracing/TracerUtil.h" #include "utils/Log.h" +#include "utils/LogUtil.h" #include "utils/TimeRecorder.h" namespace milvus { @@ -300,6 +301,8 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc: ::milvus::grpc::VectorIds* response) { CHECK_NULLPTR_RETURN(request); + SERVER_LOG_INFO << LogOut("[%s][%d] Start insert.", "insert", 0); + // step 1: copy vector data engine::VectorsData vectors; CopyRowRecords(request->row_record_array(), request->row_id_array(), vectors); @@ -313,6 +316,7 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc: memcpy(response->mutable_vector_id_array()->mutable_data(), vectors.id_array_.data(), vectors.id_array_.size() * sizeof(int64_t)); + SERVER_LOG_INFO << LogOut("[%s][%d] Insert done.", "insert", 0); SET_RESPONSE(response->mutable_status(), status, context); return ::grpc::Status::OK; } @@ -364,6 +368,7 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc: ::milvus::grpc::TopKQueryResult* response) { CHECK_NULLPTR_RETURN(request); + SERVER_LOG_INFO << LogOut("[%s][%d] Search start in gRPC server", "search", 0); // step 1: copy vector data engine::VectorsData vectors; CopyRowRecords(request->query_record_array(), google::protobuf::RepeatedField(), vectors); @@ -393,6 +398,8 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc: // step 5: construct and return result ConstructResults(result, response); + SERVER_LOG_INFO << LogOut("[%s][%d] Search done.", "search", 0); + SET_RESPONSE(response->mutable_status(), status, context); return ::grpc::Status::OK; diff --git a/core/src/server/web_impl/Constants.h b/core/src/server/web_impl/Constants.h index 83e49b661e..2697e7d55c 100644 --- a/core/src/server/web_impl/Constants.h +++ b/core/src/server/web_impl/Constants.h @@ -40,6 +40,7 @@ static const char* NAME_ENGINE_TYPE_IVFSQ8H = "IVFSQ8H"; static const char* NAME_ENGINE_TYPE_RNSG = "RNSG"; static const char* NAME_ENGINE_TYPE_IVFPQ = "IVFPQ"; static const char* NAME_ENGINE_TYPE_HNSW = "HNSW"; +static const char* NAME_ENGINE_TYPE_ANNOY = "ANNOY"; static const char* NAME_METRIC_TYPE_L2 = "L2"; static const char* NAME_METRIC_TYPE_IP = "IP"; diff --git a/core/src/server/web_impl/Types.h b/core/src/server/web_impl/Types.h index 0c01f3fa3d..2f3f89d4b1 100644 --- a/core/src/server/web_impl/Types.h +++ b/core/src/server/web_impl/Types.h @@ -81,6 +81,7 @@ static const std::unordered_map IndexMap = { {engine::EngineType::NSG_MIX, NAME_ENGINE_TYPE_RNSG}, {engine::EngineType::FAISS_PQ, NAME_ENGINE_TYPE_IVFPQ}, {engine::EngineType::HNSW, NAME_ENGINE_TYPE_HNSW}, + {engine::EngineType::ANNOY, NAME_ENGINE_TYPE_ANNOY}, }; static const std::unordered_map IndexNameMap = { @@ -91,6 +92,7 @@ static const std::unordered_map IndexNameMap = {NAME_ENGINE_TYPE_RNSG, engine::EngineType::NSG_MIX}, {NAME_ENGINE_TYPE_IVFPQ, engine::EngineType::FAISS_PQ}, {NAME_ENGINE_TYPE_HNSW, engine::EngineType::HNSW}, + {NAME_ENGINE_TYPE_ANNOY, engine::EngineType::ANNOY}, }; static const std::unordered_map MetricMap = { diff --git a/core/src/utils/Log.cpp b/core/src/utils/Log.cpp new file mode 100644 index 0000000000..6af45a2e16 --- /dev/null +++ b/core/src/utils/Log.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2020 Zilliz. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations under the License. +#include "utils/Log.h" + +#include +#include +#include +#include + +namespace milvus { + +std::string +LogOut(const char* pattern, ...) { + size_t len = strnlen(pattern, 1024) + 256; + auto str_p = std::make_unique(len); + memset(str_p.get(), 0, len); + + va_list vl; + va_start(vl, pattern); + vsnprintf(str_p.get(), len, pattern, vl); + va_end(vl); + + return std::string(str_p.get()); +} + +} // namespace milvus diff --git a/core/src/utils/Log.h b/core/src/utils/Log.h index e3052e2cbd..6cc64c387c 100644 --- a/core/src/utils/Log.h +++ b/core/src/utils/Log.h @@ -11,6 +11,8 @@ #pragma once +#include + #include "easyloggingpp/easylogging++.h" namespace milvus { @@ -64,4 +66,8 @@ namespace milvus { #define WAL_LOG_ERROR LOG(ERROR) << WAL_DOMAIN_NAME #define WAL_LOG_FATAL LOG(FATAL) << WAL_DOMAIN_NAME +///////////////////////////////////////////////////////////////////////////////////////////////////// +std::string +LogOut(const char* pattern, ...); + } // namespace milvus diff --git a/core/src/utils/LogUtil.cpp b/core/src/utils/LogUtil.cpp index def6e08fc5..95056d24fa 100644 --- a/core/src/utils/LogUtil.cpp +++ b/core/src/utils/LogUtil.cpp @@ -11,10 +11,15 @@ #include "utils/LogUtil.h" -#include #include +#include #include +#include + +#include "config/Config.h" +#include "utils/Log.h" + namespace milvus { namespace server { @@ -86,5 +91,49 @@ InitLog(const std::string& log_config_file) { return Status::OK(); } +void +LogConfigInFile(const std::string& path) { + // TODO(yhz): Check if file exists + auto node = YAML::LoadFile(path); + YAML::Emitter out; + out << node; + SERVER_LOG_DEBUG << "\n\n" + << std::string(15, '*') << "Config in file" << std::string(15, '*') << "\n\n" + << out.c_str(); +} + +void +LogConfigInMem() { + auto& config = Config::GetInstance(); + std::string config_str; + config.GetConfigJsonStr(config_str, 3); + SERVER_LOG_DEBUG << "\n\n" + << std::string(15, '*') << "Config in memory" << std::string(15, '*') << "\n\n" + << config_str; +} + +void +LogCpuInfo() { + /*CPU information*/ + std::fstream fcpu("/proc/cpuinfo", std::ios::in); + if (!fcpu.is_open()) { + SERVER_LOG_WARNING << "Cannot obtain CPU information. Open file /proc/cpuinfo fail: " << strerror(errno); + return; + } + std::stringstream cpu_info_ss; + cpu_info_ss << fcpu.rdbuf(); + fcpu.close(); + std::string cpu_info = cpu_info_ss.str(); + + auto processor_pos = cpu_info.rfind("processor"); + if (std::string::npos == processor_pos) { + SERVER_LOG_WARNING << "Cannot obtain CPU information. No sub string \'processor\'"; + return; + } + + auto sub_str = cpu_info.substr(processor_pos); + SERVER_LOG_DEBUG << "\n\n" << std::string(15, '*') << "CPU" << std::string(15, '*') << "\n\n" << sub_str; +} + } // namespace server } // namespace milvus diff --git a/core/src/utils/LogUtil.h b/core/src/utils/LogUtil.h index c188cc9a35..2d8b38ddf2 100644 --- a/core/src/utils/LogUtil.h +++ b/core/src/utils/LogUtil.h @@ -33,5 +33,14 @@ RolloutHandler(const char* filename, std::size_t size, el::Level level); #define LOCATION_INFO "" #endif +void +LogConfigInFile(const std::string& path); + +void +LogConfigInMem(); + +void +LogCpuInfo(); + } // namespace server } // namespace milvus diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 0c57df086a..5d48de56a7 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -119,6 +119,7 @@ set(entry_file set(helper_files ${MILVUS_ENGINE_SRC}/config/Config.cpp ${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp + ${MILVUS_ENGINE_SRC}/utils/Log.cpp ${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp ${MILVUS_ENGINE_SRC}/utils/Status.cpp ${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp