mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
enhance: cachinglayer: some metric and params update (#44276)
issue: #41435 --------- Signed-off-by: Shawn Wang <shawn.wang@zilliz.com>
This commit is contained in:
parent
2f8620fa79
commit
4a01c726f3
@ -501,7 +501,11 @@ queryNode:
|
||||
# This parameter is only valid when eviction is enabled.
|
||||
# It defaults to 0.3 (meaning about 30% of evictable on-disk data can be cached), with a valid range of [0.0, 1.0].
|
||||
evictableDiskCacheRatio: 0.3
|
||||
# Time in seconds after which an unaccessed cache cell will be evicted.
|
||||
# Enable background eviction for Tiered Storage. Defaults to false.
|
||||
# Background eviction is used to do periodic eviction in a separate thread.
|
||||
# And it will only work when both 'evictionEnabled' and 'backgroundEvictionEnabled' are set to 'true'.
|
||||
backgroundEvictionEnabled: false
|
||||
# Time in seconds after which an unaccessed cache cell will be evicted. 'backgroundEvictionEnabled' is required.
|
||||
# If a cached data hasn't been accessed again after this time since its last access, it will be evicted.
|
||||
# If set to 0, time based eviction is disabled.
|
||||
cacheTtl: 0
|
||||
|
||||
@ -642,7 +642,7 @@ OptimizeCompiledExprs(ExecContext* context, const std::vector<ExprPtr>& exprs) {
|
||||
std::chrono::high_resolution_clock::now();
|
||||
double cost =
|
||||
std::chrono::duration<double, std::micro>(end - start).count();
|
||||
monitor::internal_core_optimize_expr_latency.Observe(cost / 1000);
|
||||
milvus::monitor::internal_core_optimize_expr_latency.Observe(cost / 1000);
|
||||
}
|
||||
|
||||
} // namespace exec
|
||||
|
||||
@ -101,7 +101,7 @@ PhyFilterBitsNode::GetOutput() {
|
||||
Assert(valid_bitset.size() == need_process_rows_);
|
||||
auto filter_ratio =
|
||||
bitset.size() != 0 ? 1 - float(bitset.count()) / bitset.size() : 0;
|
||||
monitor::internal_core_expr_filter_ratio.Observe(filter_ratio);
|
||||
milvus::monitor::internal_core_expr_filter_ratio.Observe(filter_ratio);
|
||||
// num_processed_rows_ = need_process_rows_;
|
||||
std::vector<VectorPtr> col_res;
|
||||
col_res.push_back(std::make_shared<ColumnVector>(std::move(bitset),
|
||||
@ -111,7 +111,8 @@ PhyFilterBitsNode::GetOutput() {
|
||||
double scalar_cost =
|
||||
std::chrono::duration<double, std::micro>(scalar_end - scalar_start)
|
||||
.count();
|
||||
monitor::internal_core_search_latency_scalar.Observe(scalar_cost / 1000);
|
||||
milvus::monitor::internal_core_search_latency_scalar.Observe(scalar_cost /
|
||||
1000);
|
||||
|
||||
return std::make_shared<RowVector>(col_res);
|
||||
}
|
||||
|
||||
@ -85,7 +85,8 @@ PhyGroupByNode::GetOutput() {
|
||||
double vector_cost =
|
||||
std::chrono::duration<double, std::micro>(vector_end - vector_start)
|
||||
.count();
|
||||
monitor::internal_core_search_latency_groupby.Observe(vector_cost / 1000);
|
||||
milvus::monitor::internal_core_search_latency_groupby.Observe(vector_cost /
|
||||
1000);
|
||||
return input_;
|
||||
}
|
||||
|
||||
|
||||
@ -263,8 +263,8 @@ PhyIterativeFilterNode::GetOutput() {
|
||||
double scalar_cost =
|
||||
std::chrono::duration<double, std::micro>(scalar_end - scalar_start)
|
||||
.count();
|
||||
monitor::internal_core_search_latency_iterative_filter.Observe(scalar_cost /
|
||||
1000);
|
||||
milvus::monitor::internal_core_search_latency_iterative_filter.Observe(
|
||||
scalar_cost / 1000);
|
||||
|
||||
return input_;
|
||||
}
|
||||
|
||||
@ -164,8 +164,8 @@ PhyRandomSampleNode::GetOutput() {
|
||||
std::chrono::high_resolution_clock::now();
|
||||
double duration =
|
||||
std::chrono::duration<double, std::micro>(end - start).count();
|
||||
monitor::internal_core_search_latency_random_sample.Observe(duration /
|
||||
1000);
|
||||
milvus::monitor::internal_core_search_latency_random_sample.Observe(
|
||||
duration / 1000);
|
||||
is_finished_ = true;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -152,7 +152,8 @@ PhyRescoresNode::GetOutput() {
|
||||
double scalar_cost =
|
||||
std::chrono::duration<double, std::micro>(scalar_end - scalar_start)
|
||||
.count();
|
||||
monitor::internal_core_search_latency_rescore.Observe(scalar_cost / 1000);
|
||||
milvus::monitor::internal_core_search_latency_rescore.Observe(scalar_cost /
|
||||
1000);
|
||||
return input_;
|
||||
};
|
||||
|
||||
|
||||
@ -99,7 +99,8 @@ PhyVectorSearchNode::GetOutput() {
|
||||
double vector_cost =
|
||||
std::chrono::duration<double, std::micro>(vector_end - vector_start)
|
||||
.count();
|
||||
monitor::internal_core_search_latency_vector.Observe(vector_cost / 1000);
|
||||
milvus::monitor::internal_core_search_latency_vector.Observe(vector_cost /
|
||||
1000);
|
||||
// for now, vector search store result in query_context
|
||||
// this node interface just return bitset
|
||||
return input_;
|
||||
|
||||
@ -79,6 +79,7 @@ class FieldChunkMetricsTranslator
|
||||
column_(column),
|
||||
meta_(cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::IDENTICAL,
|
||||
milvus::cachinglayer::CellDataType::OTHER,
|
||||
CacheWarmupPolicy::CacheWarmupPolicy_Disable,
|
||||
false) {
|
||||
}
|
||||
|
||||
@ -1919,7 +1919,8 @@ ChunkedSegmentSealedImpl::bulk_subscript(FieldId field_id,
|
||||
double get_vector_cost = std::chrono::duration<double, std::micro>(
|
||||
get_vector_end - get_vector_start)
|
||||
.count();
|
||||
monitor::internal_core_get_vector_latency.Observe(get_vector_cost / 1000);
|
||||
milvus::monitor::internal_core_get_vector_latency.Observe(get_vector_cost /
|
||||
1000);
|
||||
|
||||
return vector;
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ SegmentInternalInterface::Retrieve(tracer::TraceContext* trace_ctx,
|
||||
double get_entry_cost = std::chrono::duration<double, std::micro>(
|
||||
get_target_entry_end - get_target_entry_start)
|
||||
.count();
|
||||
monitor::internal_core_retrieve_get_target_entry_latency.Observe(
|
||||
milvus::monitor::internal_core_retrieve_get_target_entry_latency.Observe(
|
||||
get_entry_cost / 1000);
|
||||
return results;
|
||||
}
|
||||
@ -274,7 +274,7 @@ SegmentInternalInterface::Retrieve(tracer::TraceContext* trace_ctx,
|
||||
double get_entry_cost = std::chrono::duration<double, std::micro>(
|
||||
get_target_entry_end - get_target_entry_start)
|
||||
.count();
|
||||
monitor::internal_core_retrieve_get_target_entry_latency.Observe(
|
||||
milvus::monitor::internal_core_retrieve_get_target_entry_latency.Observe(
|
||||
get_entry_cost / 1000);
|
||||
return results;
|
||||
}
|
||||
|
||||
@ -1117,4 +1117,16 @@ getCacheWarmupPolicy(bool is_vector, bool is_index, bool in_load_list) {
|
||||
: manager.getScalarFieldCacheWarmupPolicy();
|
||||
}
|
||||
}
|
||||
|
||||
milvus::cachinglayer::CellDataType
|
||||
getCellDataType(bool is_vector, bool is_index) {
|
||||
if (is_index) {
|
||||
return is_vector ? milvus::cachinglayer::CellDataType::VECTOR_INDEX
|
||||
: milvus::cachinglayer::CellDataType::SCALAR_INDEX;
|
||||
} else {
|
||||
return is_vector ? milvus::cachinglayer::CellDataType::VECTOR_FIELD
|
||||
: milvus::cachinglayer::CellDataType::SCALAR_FIELD;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace milvus::segcore
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
#include "common/type_c.h"
|
||||
#include "common/Types.h"
|
||||
#include "index/Index.h"
|
||||
#include "cachinglayer/Utils.h"
|
||||
#include "segcore/ConcurrentVector.h"
|
||||
|
||||
namespace milvus::segcore {
|
||||
@ -139,4 +140,7 @@ upper_bound(const ConcurrentVector<Timestamp>& timestamps,
|
||||
CacheWarmupPolicy
|
||||
getCacheWarmupPolicy(bool is_vector, bool is_index, bool in_load_list = true);
|
||||
|
||||
milvus::cachinglayer::CellDataType
|
||||
getCellDataType(bool is_vector, bool is_index);
|
||||
|
||||
} // namespace milvus::segcore
|
||||
|
||||
@ -199,7 +199,7 @@ ReduceHelper::FillEntryData() {
|
||||
std::chrono::duration<double, std::micro>(get_target_entry_end -
|
||||
get_target_entry_start)
|
||||
.count();
|
||||
monitor::internal_core_search_get_target_entry_latency.Observe(
|
||||
milvus::monitor::internal_core_search_get_target_entry_latency.Observe(
|
||||
get_entry_cost / 1000);
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,8 +195,9 @@ ConfigureTieredStorage(const CacheWarmupPolicy scalarFieldCacheWarmupPolicy,
|
||||
const int64_t disk_low_watermark_bytes,
|
||||
const int64_t disk_high_watermark_bytes,
|
||||
const int64_t disk_max_bytes,
|
||||
const bool evictionEnabled,
|
||||
const bool eviction_enabled,
|
||||
const int64_t cache_touch_window_ms,
|
||||
const bool background_eviction_enabled,
|
||||
const int64_t eviction_interval_ms,
|
||||
const int64_t cache_cell_unaccessed_survival_time,
|
||||
const float overloaded_memory_threshold_percentage,
|
||||
@ -215,8 +216,9 @@ ConfigureTieredStorage(const CacheWarmupPolicy scalarFieldCacheWarmupPolicy,
|
||||
disk_low_watermark_bytes,
|
||||
disk_high_watermark_bytes,
|
||||
disk_max_bytes},
|
||||
evictionEnabled,
|
||||
eviction_enabled,
|
||||
{cache_touch_window_ms,
|
||||
background_eviction_enabled,
|
||||
eviction_interval_ms,
|
||||
cache_cell_unaccessed_survival_time,
|
||||
overloaded_memory_threshold_percentage,
|
||||
|
||||
@ -107,9 +107,10 @@ ConfigureTieredStorage(
|
||||
const int64_t disk_high_watermark_bytes,
|
||||
const int64_t disk_max_bytes,
|
||||
// eviction enabled
|
||||
const bool evictionEnabled,
|
||||
const bool eviction_enabled,
|
||||
// eviction configs
|
||||
const int64_t cache_touch_window_ms,
|
||||
const bool background_eviction_enabled,
|
||||
const int64_t eviction_interval_ms,
|
||||
const int64_t cache_cell_unaccessed_survival_time,
|
||||
const float overloaded_memory_threshold_percentage,
|
||||
|
||||
@ -84,6 +84,9 @@ ChunkTranslator::ChunkTranslator(
|
||||
meta_(use_mmap ? milvus::cachinglayer::StorageType::DISK
|
||||
: milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::IDENTICAL,
|
||||
milvus::segcore::getCellDataType(
|
||||
IsVectorDataType(field_meta.get_data_type()),
|
||||
/* is_index */ false),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
IsVectorDataType(field_meta.get_data_type()),
|
||||
/* is_index */ false,
|
||||
|
||||
@ -32,10 +32,12 @@ struct CTMeta : public milvus::cachinglayer::Meta {
|
||||
virt_chunk_order_; // indicates the size of each virtual chunk, i.e. 2^virt_chunk_order_
|
||||
CTMeta(milvus::cachinglayer::StorageType storage_type,
|
||||
milvus::cachinglayer::CellIdMappingMode cell_id_mapping_mode,
|
||||
milvus::cachinglayer::CellDataType cell_data_type,
|
||||
CacheWarmupPolicy cache_warmup_policy,
|
||||
bool support_eviction)
|
||||
: milvus::cachinglayer::Meta(storage_type,
|
||||
cell_id_mapping_mode,
|
||||
cell_data_type,
|
||||
cache_warmup_policy,
|
||||
support_eviction) {
|
||||
}
|
||||
|
||||
@ -30,6 +30,9 @@ DefaultValueChunkTranslator::DefaultValueChunkTranslator(
|
||||
meta_(use_mmap ? milvus::cachinglayer::StorageType::DISK
|
||||
: milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO,
|
||||
milvus::segcore::getCellDataType(
|
||||
IsVectorDataType(field_meta.get_data_type()),
|
||||
/* is_index */ false),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
IsVectorDataType(field_meta.get_data_type()),
|
||||
/* is_index */ false,
|
||||
|
||||
@ -24,6 +24,9 @@ InterimSealedIndexTranslator::InterimSealedIndexTranslator(
|
||||
index_key_(fmt::format("seg_{}_ii_{}", segment_id, field_id)),
|
||||
meta_(milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO,
|
||||
milvus::segcore::getCellDataType(
|
||||
/* is_vector */ true,
|
||||
/* is_index */ true),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
/* is_vector */ true,
|
||||
/* is_index */ true),
|
||||
|
||||
@ -36,8 +36,11 @@ SealedIndexTranslator::SealedIndexTranslator(
|
||||
? milvus::cachinglayer::StorageType::DISK
|
||||
: milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO,
|
||||
milvus::segcore::getCellDataType(
|
||||
/* is_vector */ IsVectorDataType(load_index_info->field_type),
|
||||
/* is_index */ true),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
IsVectorDataType(load_index_info->field_type),
|
||||
/* is_vector */ IsVectorDataType(load_index_info->field_type),
|
||||
/* is_index */ true),
|
||||
/* support_eviction */
|
||||
// if index data supports lazy load internally, we don't need to support eviction for index metadata
|
||||
|
||||
@ -34,8 +34,11 @@ V1SealedIndexTranslator::V1SealedIndexTranslator(
|
||||
? milvus::cachinglayer::StorageType::DISK
|
||||
: milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO,
|
||||
milvus::segcore::getCellDataType(
|
||||
/* is_vector */ IsVectorDataType(load_index_info->field_type),
|
||||
/* is_index */ true),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
IsVectorDataType(load_index_info->field_type),
|
||||
/* is_vector */ IsVectorDataType(load_index_info->field_type),
|
||||
/* is_index */ true),
|
||||
/* support_eviction */ false) {
|
||||
}
|
||||
|
||||
@ -26,10 +26,12 @@ struct GroupCTMeta : public milvus::cachinglayer::Meta {
|
||||
GroupCTMeta(size_t num_fields,
|
||||
milvus::cachinglayer::StorageType storage_type,
|
||||
milvus::cachinglayer::CellIdMappingMode cell_id_mapping_mode,
|
||||
milvus::cachinglayer::CellDataType cell_data_type,
|
||||
CacheWarmupPolicy cache_warmup_policy,
|
||||
bool support_eviction)
|
||||
: milvus::cachinglayer::Meta(storage_type,
|
||||
cell_id_mapping_mode,
|
||||
cell_data_type,
|
||||
cache_warmup_policy,
|
||||
support_eviction),
|
||||
num_fields_(num_fields) {
|
||||
|
||||
@ -62,6 +62,17 @@ GroupChunkTranslator::GroupChunkTranslator(
|
||||
use_mmap ? milvus::cachinglayer::StorageType::DISK
|
||||
: milvus::cachinglayer::StorageType::MEMORY,
|
||||
milvus::cachinglayer::CellIdMappingMode::IDENTICAL,
|
||||
milvus::segcore::getCellDataType(
|
||||
/* is_vector */
|
||||
[&]() {
|
||||
for (const auto& [fid, field_meta] : field_metas_) {
|
||||
if (IsVectorDataType(field_meta.get_data_type())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}(),
|
||||
/* is_index */ false),
|
||||
milvus::segcore::getCacheWarmupPolicy(
|
||||
/* is_vector */
|
||||
[&]() {
|
||||
|
||||
@ -209,14 +209,14 @@ FileWriter::WriteWithDirectIO(const void* data, size_t nbyte) {
|
||||
|
||||
assert(src == static_cast<const char*>(data) + nbyte);
|
||||
|
||||
monitor::disk_write_total_bytes_direct.Increment(nbyte);
|
||||
milvus::monitor::disk_write_total_bytes_direct.Increment(nbyte);
|
||||
}
|
||||
|
||||
void
|
||||
FileWriter::WriteWithBufferedIO(const void* data, size_t nbyte) {
|
||||
PositionedWriteWithCheck(data, nbyte, file_size_);
|
||||
file_size_ += nbyte;
|
||||
monitor::disk_write_total_bytes_buffered.Increment(nbyte);
|
||||
milvus::monitor::disk_write_total_bytes_buffered.Increment(nbyte);
|
||||
}
|
||||
|
||||
void
|
||||
@ -270,7 +270,7 @@ FileWriter::FlushWithDirectIO() {
|
||||
nearest_aligned_offset - offset_);
|
||||
PositionedWriteWithCheck(aligned_buf_, nearest_aligned_offset, file_size_);
|
||||
file_size_ += offset_;
|
||||
monitor::disk_write_total_bytes_direct.Increment(offset_);
|
||||
milvus::monitor::disk_write_total_bytes_direct.Increment(offset_);
|
||||
// truncate the file to the actual size since the file written by the aligned buffer may be larger than the actual size
|
||||
if (ftruncate(fd_, file_size_) != 0) {
|
||||
Cleanup();
|
||||
|
||||
@ -484,7 +484,7 @@ MinioChunkManager::ObjectExists(const std::string& bucket_name,
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->HeadObject(request);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
@ -492,17 +492,17 @@ MinioChunkManager::ObjectExists(const std::string& bucket_name,
|
||||
if (!outcome.IsSuccess()) {
|
||||
const auto& err = outcome.GetError();
|
||||
if (!IsNotFound(err.GetErrorType())) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
ThrowS3Error("ObjectExists",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
bucket_name,
|
||||
object_name);
|
||||
}
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
return false;
|
||||
}
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -515,12 +515,12 @@ MinioChunkManager::GetObjectSize(const std::string& bucket_name,
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->HeadObject(request);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
if (!outcome.IsSuccess()) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
const auto& err = outcome.GetError();
|
||||
ThrowS3Error("GetObjectSize",
|
||||
err,
|
||||
@ -528,7 +528,7 @@ MinioChunkManager::GetObjectSize(const std::string& bucket_name,
|
||||
bucket_name,
|
||||
object_name);
|
||||
}
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
return outcome.GetResult().GetContentLength();
|
||||
}
|
||||
|
||||
@ -541,7 +541,7 @@ MinioChunkManager::DeleteObject(const std::string& bucket_name,
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->DeleteObject(request);
|
||||
monitor::internal_storage_request_latency_remove.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_remove.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
@ -549,17 +549,17 @@ MinioChunkManager::DeleteObject(const std::string& bucket_name,
|
||||
if (!outcome.IsSuccess()) {
|
||||
const auto& err = outcome.GetError();
|
||||
if (!IsNotFound(err.GetErrorType())) {
|
||||
monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
ThrowS3Error("DeleteObject",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
bucket_name,
|
||||
object_name);
|
||||
}
|
||||
monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
return false;
|
||||
}
|
||||
monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -580,14 +580,14 @@ MinioChunkManager::PutObjectBuffer(const std::string& bucket_name,
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->PutObject(request);
|
||||
monitor::internal_storage_request_latency_put.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_put.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_kv_size_put.Observe(size);
|
||||
milvus::monitor::internal_storage_kv_size_put.Observe(size);
|
||||
|
||||
if (!outcome.IsSuccess()) {
|
||||
monitor::internal_storage_op_count_put_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_put_fail.Increment();
|
||||
const auto& err = outcome.GetError();
|
||||
ThrowS3Error("PutObjectBuffer",
|
||||
err,
|
||||
@ -595,7 +595,7 @@ MinioChunkManager::PutObjectBuffer(const std::string& bucket_name,
|
||||
bucket_name,
|
||||
object_name);
|
||||
}
|
||||
monitor::internal_storage_op_count_put_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_put_suc.Increment();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -661,14 +661,14 @@ MinioChunkManager::GetObjectBuffer(const std::string& bucket_name,
|
||||
});
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->GetObject(request);
|
||||
monitor::internal_storage_request_latency_get.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_get.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_kv_size_get.Observe(size);
|
||||
milvus::monitor::internal_storage_kv_size_get.Observe(size);
|
||||
|
||||
if (!outcome.IsSuccess()) {
|
||||
monitor::internal_storage_op_count_get_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_get_fail.Increment();
|
||||
const auto& err = outcome.GetError();
|
||||
ThrowS3Error("GetObjectBuffer",
|
||||
err,
|
||||
@ -676,7 +676,7 @@ MinioChunkManager::GetObjectBuffer(const std::string& bucket_name,
|
||||
bucket_name,
|
||||
object_name);
|
||||
}
|
||||
monitor::internal_storage_op_count_get_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_get_suc.Increment();
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -692,13 +692,13 @@ MinioChunkManager::ListObjects(const std::string& bucket_name,
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto outcome = client_->ListObjects(request);
|
||||
monitor::internal_storage_request_latency_list.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_list.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
|
||||
if (!outcome.IsSuccess()) {
|
||||
monitor::internal_storage_op_count_list_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_fail.Increment();
|
||||
const auto& err = outcome.GetError();
|
||||
ThrowS3Error("ListObjects",
|
||||
err,
|
||||
@ -706,7 +706,7 @@ MinioChunkManager::ListObjects(const std::string& bucket_name,
|
||||
bucket_name,
|
||||
prefix);
|
||||
}
|
||||
monitor::internal_storage_op_count_list_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_suc.Increment();
|
||||
auto objects = outcome.GetResult().GetContents();
|
||||
for (auto& obj : objects) {
|
||||
objects_vec.emplace_back(obj.GetKey());
|
||||
|
||||
@ -175,13 +175,13 @@ AzureChunkManager::ObjectExists(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->ObjectExists(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
ThrowAzureError("ObjectExists",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -198,13 +198,13 @@ AzureChunkManager::GetObjectSize(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->GetObjectSize(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
ThrowAzureError("GetObjectSize",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -221,13 +221,13 @@ AzureChunkManager::DeleteObject(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->DeleteObject(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_remove.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_remove.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
ThrowAzureError("DeleteObject",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -246,14 +246,14 @@ AzureChunkManager::PutObjectBuffer(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->PutObjectBuffer(bucket_name, object_name, buf, size);
|
||||
monitor::internal_storage_request_latency_put.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_put.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_put_suc.Increment();
|
||||
monitor::internal_storage_kv_size_put.Observe(size);
|
||||
milvus::monitor::internal_storage_op_count_put_suc.Increment();
|
||||
milvus::monitor::internal_storage_kv_size_put.Observe(size);
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_put_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_put_fail.Increment();
|
||||
ThrowAzureError("PutObjectBuffer",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -272,14 +272,14 @@ AzureChunkManager::GetObjectBuffer(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->GetObjectBuffer(bucket_name, object_name, buf, size);
|
||||
monitor::internal_storage_request_latency_get.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_get.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_get_suc.Increment();
|
||||
monitor::internal_storage_kv_size_get.Observe(size);
|
||||
milvus::monitor::internal_storage_op_count_get_suc.Increment();
|
||||
milvus::monitor::internal_storage_kv_size_get.Observe(size);
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_get_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_get_fail.Increment();
|
||||
ThrowAzureError("GetObjectBuffer",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -296,13 +296,13 @@ AzureChunkManager::ListObjects(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->ListObjects(bucket_name, prefix);
|
||||
monitor::internal_storage_request_latency_list.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_list.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_list_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_list_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_fail.Increment();
|
||||
ThrowAzureError("ListObjects",
|
||||
err,
|
||||
"params, bucket={}, prefix={}",
|
||||
|
||||
@ -135,13 +135,13 @@ GcpNativeChunkManager::ObjectExists(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->ObjectExists(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
ThrowGcpNativeError("ObjectExists",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -158,13 +158,13 @@ GcpNativeChunkManager::GetObjectSize(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->GetObjectSize(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_stat.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_stat.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_stat_fail.Increment();
|
||||
ThrowGcpNativeError("GetObjectSize",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -181,13 +181,13 @@ GcpNativeChunkManager::DeleteObject(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->DeleteObject(bucket_name, object_name);
|
||||
monitor::internal_storage_request_latency_remove.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_remove.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_remove_fail.Increment();
|
||||
ThrowGcpNativeError("DeleteObject",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -206,14 +206,14 @@ GcpNativeChunkManager::PutObjectBuffer(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->PutObjectBuffer(bucket_name, object_name, buf, size);
|
||||
monitor::internal_storage_request_latency_put.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_put.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_put_suc.Increment();
|
||||
monitor::internal_storage_kv_size_put.Observe(size);
|
||||
milvus::monitor::internal_storage_op_count_put_suc.Increment();
|
||||
milvus::monitor::internal_storage_kv_size_put.Observe(size);
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_put_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_put_fail.Increment();
|
||||
ThrowGcpNativeError("PutObjectBuffer",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -232,14 +232,14 @@ GcpNativeChunkManager::GetObjectBuffer(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->GetObjectBuffer(bucket_name, object_name, buf, size);
|
||||
monitor::internal_storage_request_latency_get.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_get.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_get_suc.Increment();
|
||||
monitor::internal_storage_kv_size_get.Observe(size);
|
||||
milvus::monitor::internal_storage_op_count_get_suc.Increment();
|
||||
milvus::monitor::internal_storage_kv_size_get.Observe(size);
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_get_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_get_fail.Increment();
|
||||
ThrowGcpNativeError("GetObjectBuffer",
|
||||
err,
|
||||
"params, bucket={}, object={}",
|
||||
@ -256,13 +256,13 @@ GcpNativeChunkManager::ListObjects(const std::string& bucket_name,
|
||||
try {
|
||||
auto start = std::chrono::system_clock::now();
|
||||
res = client_->ListObjects(bucket_name, prefix);
|
||||
monitor::internal_storage_request_latency_list.Observe(
|
||||
milvus::monitor::internal_storage_request_latency_list.Observe(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now() - start)
|
||||
.count());
|
||||
monitor::internal_storage_op_count_list_suc.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_suc.Increment();
|
||||
} catch (std::exception& err) {
|
||||
monitor::internal_storage_op_count_list_fail.Increment();
|
||||
milvus::monitor::internal_storage_op_count_list_fail.Increment();
|
||||
ThrowGcpNativeError("ListObjects",
|
||||
err,
|
||||
"params, bucket={}, prefix={}",
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
|
||||
milvus_add_pkg_config("milvus-common")
|
||||
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES "")
|
||||
set( MILVUS-COMMON-VERSION 2008bac )
|
||||
set( MILVUS-COMMON-VERSION 7204eb6 )
|
||||
set( GIT_REPOSITORY "https://github.com/zilliztech/milvus-common.git")
|
||||
|
||||
message(STATUS "milvus-common repo: ${GIT_REPOSITORY}")
|
||||
|
||||
@ -38,7 +38,7 @@ main(int argc, char** argv) {
|
||||
CacheWarmupPolicy::CacheWarmupPolicy_Disable},
|
||||
{1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb},
|
||||
true,
|
||||
{10, 30});
|
||||
{10, true, 30});
|
||||
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
@ -40,6 +40,7 @@ class TestChunkTranslator : public Translator<milvus::Chunk> {
|
||||
meta_(segcore::storagev1translator::CTMeta(
|
||||
StorageType::MEMORY,
|
||||
CellIdMappingMode::IDENTICAL,
|
||||
CellDataType::SCALAR_FIELD,
|
||||
CacheWarmupPolicy::CacheWarmupPolicy_Disable,
|
||||
true)) {
|
||||
meta_.num_rows_until_chunk_.reserve(num_cells_ + 1);
|
||||
@ -118,6 +119,7 @@ class TestGroupChunkTranslator : public Translator<milvus::GroupChunk> {
|
||||
num_fields,
|
||||
StorageType::MEMORY,
|
||||
CellIdMappingMode::IDENTICAL,
|
||||
CellDataType::OTHER,
|
||||
CacheWarmupPolicy::CacheWarmupPolicy_Disable,
|
||||
true)) {
|
||||
meta_.num_rows_until_chunk_.reserve(num_cells_ + 1);
|
||||
@ -185,6 +187,7 @@ class TestIndexTranslator : public Translator<milvus::index::IndexBase> {
|
||||
meta_(milvus::cachinglayer::Meta(
|
||||
StorageType::MEMORY,
|
||||
CellIdMappingMode::IDENTICAL,
|
||||
CellDataType::OTHER,
|
||||
CacheWarmupPolicy::CacheWarmupPolicy_Disable,
|
||||
false)) {
|
||||
}
|
||||
|
||||
@ -353,6 +353,7 @@ func InitTieredStorage(params *paramtable.ComponentParam) error {
|
||||
|
||||
evictionEnabled := C.bool(params.QueryNodeCfg.TieredEvictionEnabled.GetAsBool())
|
||||
cacheTouchWindowMs := C.int64_t(params.QueryNodeCfg.TieredCacheTouchWindowMs.GetAsInt64())
|
||||
backgroundEvictionEnabled := C.bool(params.QueryNodeCfg.TieredBackgroundEvictionEnabled.GetAsBool())
|
||||
evictionIntervalMs := C.int64_t(params.QueryNodeCfg.TieredEvictionIntervalMs.GetAsInt64())
|
||||
cacheCellUnaccessedSurvivalTime := C.int64_t(params.QueryNodeCfg.CacheCellUnaccessedSurvivalTime.GetAsInt64())
|
||||
loadingResourceFactor := C.float(params.QueryNodeCfg.TieredLoadingResourceFactor.GetAsFloat())
|
||||
@ -367,7 +368,8 @@ func InitTieredStorage(params *paramtable.ComponentParam) error {
|
||||
vectorIndexCacheWarmupPolicy,
|
||||
memoryLowWatermarkBytes, memoryHighWatermarkBytes, memoryMaxBytes,
|
||||
diskLowWatermarkBytes, diskHighWatermarkBytes, diskMaxBytes,
|
||||
evictionEnabled, cacheTouchWindowMs, evictionIntervalMs, cacheCellUnaccessedSurvivalTime,
|
||||
evictionEnabled, cacheTouchWindowMs,
|
||||
backgroundEvictionEnabled, evictionIntervalMs, cacheCellUnaccessedSurvivalTime,
|
||||
overloadedMemoryThresholdPercentage, loadingResourceFactor, maxDiskUsagePercentage, diskPath)
|
||||
|
||||
tieredEvictableMemoryCacheRatio := params.QueryNodeCfg.TieredEvictableMemoryCacheRatio.GetAsFloat()
|
||||
|
||||
@ -2958,9 +2958,10 @@ type queryNodeConfig struct {
|
||||
TieredEvictableMemoryCacheRatio ParamItem `refreshable:"false"`
|
||||
TieredEvictableDiskCacheRatio ParamItem `refreshable:"false"`
|
||||
TieredCacheTouchWindowMs ParamItem `refreshable:"false"`
|
||||
TieredBackgroundEvictionEnabled ParamItem `refreshable:"false"`
|
||||
TieredEvictionIntervalMs ParamItem `refreshable:"false"`
|
||||
TieredLoadingResourceFactor ParamItem `refreshable:"false"`
|
||||
CacheCellUnaccessedSurvivalTime ParamItem `refreshable:"false"`
|
||||
TieredLoadingResourceFactor ParamItem `refreshable:"false"`
|
||||
|
||||
KnowhereScoreConsistency ParamItem `refreshable:"false"`
|
||||
|
||||
@ -3300,6 +3301,17 @@ eviction is necessary and the amount of data to evict from memory/disk.
|
||||
}
|
||||
p.TieredCacheTouchWindowMs.Init(base.mgr)
|
||||
|
||||
p.TieredBackgroundEvictionEnabled = ParamItem{
|
||||
Key: "queryNode.segcore.tieredStorage.backgroundEvictionEnabled",
|
||||
Version: "2.6.2",
|
||||
DefaultValue: "false",
|
||||
Doc: `Enable background eviction for Tiered Storage. Defaults to false.
|
||||
Background eviction is used to do periodic eviction in a separate thread.
|
||||
And it will only work when both 'evictionEnabled' and 'backgroundEvictionEnabled' are set to 'true'.`,
|
||||
Export: true,
|
||||
}
|
||||
p.TieredBackgroundEvictionEnabled.Init(base.mgr)
|
||||
|
||||
p.TieredEvictionIntervalMs = ParamItem{
|
||||
Key: "queryNode.segcore.tieredStorage.evictionIntervalMs",
|
||||
Version: "2.6.0",
|
||||
@ -3311,11 +3323,29 @@ eviction is necessary and the amount of data to evict from memory/disk.
|
||||
}
|
||||
return fmt.Sprintf("%d", window)
|
||||
},
|
||||
Doc: "Interval in milliseconds to run periodic eviction.",
|
||||
Doc: "Interval in milliseconds to run periodic eviction. 'backgroundEvictionEnabled' is required.",
|
||||
Export: false,
|
||||
}
|
||||
p.TieredEvictionIntervalMs.Init(base.mgr)
|
||||
|
||||
p.CacheCellUnaccessedSurvivalTime = ParamItem{
|
||||
Key: "queryNode.segcore.tieredStorage.cacheTtl",
|
||||
Version: "2.6.0",
|
||||
DefaultValue: "0",
|
||||
Formatter: func(v string) string {
|
||||
timeout := getAsInt64(v)
|
||||
if timeout <= 0 {
|
||||
return "0"
|
||||
}
|
||||
return fmt.Sprintf("%d", timeout)
|
||||
},
|
||||
Doc: `Time in seconds after which an unaccessed cache cell will be evicted. 'backgroundEvictionEnabled' is required.
|
||||
If a cached data hasn't been accessed again after this time since its last access, it will be evicted.
|
||||
If set to 0, time based eviction is disabled.`,
|
||||
Export: true,
|
||||
}
|
||||
p.CacheCellUnaccessedSurvivalTime.Init(base.mgr)
|
||||
|
||||
p.TieredLoadingResourceFactor = ParamItem{
|
||||
Key: "queryNode.segcore.tieredStorage.loadingResourceFactor",
|
||||
Version: "2.6.0",
|
||||
@ -3332,24 +3362,6 @@ eviction is necessary and the amount of data to evict from memory/disk.
|
||||
}
|
||||
p.TieredLoadingResourceFactor.Init(base.mgr)
|
||||
|
||||
p.CacheCellUnaccessedSurvivalTime = ParamItem{
|
||||
Key: "queryNode.segcore.tieredStorage.cacheTtl",
|
||||
Version: "2.6.0",
|
||||
DefaultValue: "0",
|
||||
Formatter: func(v string) string {
|
||||
timeout := getAsInt64(v)
|
||||
if timeout <= 0 {
|
||||
return "0"
|
||||
}
|
||||
return fmt.Sprintf("%d", timeout)
|
||||
},
|
||||
Doc: `Time in seconds after which an unaccessed cache cell will be evicted.
|
||||
If a cached data hasn't been accessed again after this time since its last access, it will be evicted.
|
||||
If set to 0, time based eviction is disabled.`,
|
||||
Export: true,
|
||||
}
|
||||
p.CacheCellUnaccessedSurvivalTime.Init(base.mgr)
|
||||
|
||||
p.EnableDisk = ParamItem{
|
||||
Key: "queryNode.enableDisk",
|
||||
Version: "2.2.0",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user