mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
Merge branch 'branch-1.2' into 'branch-1.2'
MS-47 Add vps and query metrics See merge request megasearch/vecwise_engine!52 Former-commit-id: d3c26123790f1eb56f4a16c791a6723c71ee0e6f
This commit is contained in:
commit
47107b66dc
@ -18,6 +18,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- MS-31 - cmake: add prometheus
|
||||
- MS-33 - cmake: add -j4 to make third party packages build faster
|
||||
- MS-27 - support gpu config and disable license build config in cmake
|
||||
- MS-47 - Add query vps metrics
|
||||
|
||||
### Task
|
||||
|
||||
|
||||
@ -88,8 +88,10 @@ Status DBImpl<EngineT>::Query(const std::string &table_id, size_t k, size_t nq,
|
||||
auto total_time = METRICS_MICROSECONDS(start_time,end_time);
|
||||
auto average_time = total_time / nq;
|
||||
for (int i = 0; i < nq; ++i) {
|
||||
server::Metrics::GetInstance().QueryResponseSummaryObserve(average_time);
|
||||
server::Metrics::GetInstance().QueryResponseSummaryObserve(total_time);
|
||||
}
|
||||
server::Metrics::GetInstance().QueryVectorResponseSummaryObserve(average_time, nq);
|
||||
server::Metrics::GetInstance().QueryVectorResponsePerSecondGaugeSet(double (nq) / total_time);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -69,6 +69,8 @@ class MetricsBase{
|
||||
virtual void DataFileSizeGaugeSet(double value) {};
|
||||
virtual void AddVectorsSuccessGaugeSet(double value) {};
|
||||
virtual void AddVectorsFailGaugeSet(double value) {};
|
||||
virtual void QueryVectorResponseSummaryObserve(double value, int count = 1) {};
|
||||
virtual void QueryVectorResponsePerSecondGaugeSet(double value) {};
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -81,27 +81,29 @@ class PrometheusMetrics: public MetricsBase {
|
||||
void MemUsageTotalGaugeIncrement(double value = 1.0) override { if(startup_) mem_usage_total_gauge_.Increment(value);};
|
||||
void MemUsageTotalGaugeDecrement(double value = 1.0) override { if(startup_) mem_usage_total_gauge_.Decrement(value);};
|
||||
|
||||
void MetaAccessTotalIncrement(double value = 1) { if(startup_) meta_access_total_.Increment(value);};
|
||||
void MetaAccessDurationSecondsHistogramObserve(double value) { if(startup_) meta_access_duration_seconds_histogram_.Observe(value);};
|
||||
void MetaAccessTotalIncrement(double value = 1) override { if(startup_) meta_access_total_.Increment(value);};
|
||||
void MetaAccessDurationSecondsHistogramObserve(double value) override { if(startup_) meta_access_duration_seconds_histogram_.Observe(value);};
|
||||
|
||||
void FaissDiskLoadDurationSecondsHistogramObserve(double value) { if(startup_) faiss_disk_load_duration_seconds_histogram_.Observe(value);};
|
||||
void FaissDiskLoadSizeBytesHistogramObserve(double value) { if(startup_) faiss_disk_load_size_bytes_histogram_.Observe(value);};
|
||||
void FaissDiskLoadDurationSecondsHistogramObserve(double value) override { if(startup_) faiss_disk_load_duration_seconds_histogram_.Observe(value);};
|
||||
void FaissDiskLoadSizeBytesHistogramObserve(double value) override { if(startup_) faiss_disk_load_size_bytes_histogram_.Observe(value);};
|
||||
// void FaissDiskLoadIOSpeedHistogramObserve(double value) { if(startup_) faiss_disk_load_IO_speed_histogram_.Observe(value);};
|
||||
void FaissDiskLoadIOSpeedGaugeSet(double value) { if(startup_) faiss_disk_load_IO_speed_gauge_.Set(value);};
|
||||
void FaissDiskLoadIOSpeedGaugeSet(double value) override { if(startup_) faiss_disk_load_IO_speed_gauge_.Set(value);};
|
||||
|
||||
void CacheAccessTotalIncrement(double value = 1) { if(startup_) cache_access_total_.Increment(value);};
|
||||
void MemTableMergeDurationSecondsHistogramObserve(double value) { if(startup_) mem_table_merge_duration_seconds_histogram_.Observe(value);};
|
||||
void SearchIndexDataDurationSecondsHistogramObserve(double value) { if(startup_) search_index_data_duration_seconds_histogram_.Observe(value);};
|
||||
void SearchRawDataDurationSecondsHistogramObserve(double value) { if(startup_) search_raw_data_duration_seconds_histogram_.Observe(value);};
|
||||
void IndexFileSizeTotalIncrement(double value = 1) { if(startup_) index_file_size_total_.Increment(value);};
|
||||
void RawFileSizeTotalIncrement(double value = 1) { if(startup_) raw_file_size_total_.Increment(value);};
|
||||
void IndexFileSizeGaugeSet(double value) { if(startup_) index_file_size_gauge_.Set(value);};
|
||||
void RawFileSizeGaugeSet(double value) { if(startup_) raw_file_size_gauge_.Set(value);};
|
||||
void QueryResponseSummaryObserve(double value) {if(startup_) query_response_summary_.Observe(value);};
|
||||
void DiskStoreIOSpeedGaugeSet(double value) { if(startup_) disk_store_IO_speed_gauge_.Set(value);};
|
||||
void DataFileSizeGaugeSet(double value) { if(startup_) data_file_size_gauge_.Set(value);};
|
||||
void AddVectorsSuccessGaugeSet(double value) { if(startup_) add_vectors_success_gauge_.Set(value);};
|
||||
void AddVectorsFailGaugeSet(double value) { if(startup_) add_vectors_fail_gauge_.Set(value);};
|
||||
void CacheAccessTotalIncrement(double value = 1) override { if(startup_) cache_access_total_.Increment(value);};
|
||||
void MemTableMergeDurationSecondsHistogramObserve(double value) override { if(startup_) mem_table_merge_duration_seconds_histogram_.Observe(value);};
|
||||
void SearchIndexDataDurationSecondsHistogramObserve(double value) override { if(startup_) search_index_data_duration_seconds_histogram_.Observe(value);};
|
||||
void SearchRawDataDurationSecondsHistogramObserve(double value) override { if(startup_) search_raw_data_duration_seconds_histogram_.Observe(value);};
|
||||
void IndexFileSizeTotalIncrement(double value = 1) override { if(startup_) index_file_size_total_.Increment(value);};
|
||||
void RawFileSizeTotalIncrement(double value = 1) override { if(startup_) raw_file_size_total_.Increment(value);};
|
||||
void IndexFileSizeGaugeSet(double value) override { if(startup_) index_file_size_gauge_.Set(value);};
|
||||
void RawFileSizeGaugeSet(double value) override { if(startup_) raw_file_size_gauge_.Set(value);};
|
||||
void QueryResponseSummaryObserve(double value) override {if(startup_) query_response_summary_.Observe(value);};
|
||||
void DiskStoreIOSpeedGaugeSet(double value) override { if(startup_) disk_store_IO_speed_gauge_.Set(value);};
|
||||
void DataFileSizeGaugeSet(double value) override { if(startup_) data_file_size_gauge_.Set(value);};
|
||||
void AddVectorsSuccessGaugeSet(double value) override { if(startup_) add_vectors_success_gauge_.Set(value);};
|
||||
void AddVectorsFailGaugeSet(double value) override { if(startup_) add_vectors_fail_gauge_.Set(value);};
|
||||
void QueryVectorResponseSummaryObserve(double value, int count = 1) override { if (startup_) for(int i = 0 ; i < count ; ++i) query_vector_response_summary_.Observe(value);};
|
||||
void QueryVectorResponsePerSecondGaugeSet(double value) override {if (startup_) query_vector_response_per_second_gauge_.Set(value);};
|
||||
|
||||
|
||||
|
||||
@ -400,6 +402,18 @@ class PrometheusMetrics: public MetricsBase {
|
||||
.Register(*registry_);
|
||||
prometheus::Summary &query_response_summary_ = query_response_.Add({}, Quantiles{{0.95,0.00},{0.9,0.05},{0.8,0.1}});
|
||||
|
||||
prometheus::Family<prometheus::Summary> &query_vector_response_ = prometheus::BuildSummary()
|
||||
.Name("query_vector_response_summary")
|
||||
.Help("query each vector response summary")
|
||||
.Register(*registry_);
|
||||
prometheus::Summary &query_vector_response_summary_ = query_vector_response_.Add({}, Quantiles{{0.95,0.00},{0.9,0.05},{0.8,0.1}});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &query_vector_response_per_second_ = prometheus::BuildGauge()
|
||||
.Name("query_vector_response_per_microsecond")
|
||||
.Help("the number of vectors can be queried every second ")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &query_vector_response_per_second_gauge_ = query_vector_response_per_second_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &disk_store_IO_speed_ = prometheus::BuildGauge()
|
||||
.Name("disk_store_IO_speed_bytes_per_microseconds")
|
||||
.Help("disk_store_IO_speed")
|
||||
@ -418,6 +432,8 @@ class PrometheusMetrics: public MetricsBase {
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &add_vectors_success_gauge_ = add_vectors_.Add({{"outcome", "success"}});
|
||||
prometheus::Gauge &add_vectors_fail_gauge_ = add_vectors_.Add({{"outcome", "fail"}});
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -136,9 +136,6 @@ Server::Daemonize() {
|
||||
|
||||
int
|
||||
Server::Start() {
|
||||
// server::Metrics::GetInstance().Init();
|
||||
// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr());
|
||||
// server::Metrics::GetInstance().Init();
|
||||
|
||||
if (daemonized_) {
|
||||
Daemonize();
|
||||
|
||||
@ -32,7 +32,7 @@ TEST_F(DBTest, Metric_Tes) {
|
||||
// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr());
|
||||
server::Metrics::GetInstance().Init();
|
||||
// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr());
|
||||
zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->SetCapacity(4*1024*1024*1024);
|
||||
zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->SetCapacity(2UL*1024*1024*1024);
|
||||
std::cout<<zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->CacheCapacity()<<std::endl;
|
||||
static const std::string group_name = "test_group";
|
||||
static const int group_dim = 256;
|
||||
@ -109,7 +109,7 @@ TEST_F(DBTest, Metric_Tes) {
|
||||
} else {
|
||||
db_->InsertVectors(group_name, nb, xb, vector_ids);
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(1));
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(2000));
|
||||
}
|
||||
|
||||
search.join();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user