From 4a01c726f33b08c4c4dbe3443dfd37d7cd47a086 Mon Sep 17 00:00:00 2001 From: sparknack Date: Wed, 10 Sep 2025 11:03:57 +0800 Subject: [PATCH] enhance: cachinglayer: some metric and params update (#44276) issue: #41435 --------- Signed-off-by: Shawn Wang --- configs/milvus.yaml | 6 ++- internal/core/src/exec/expression/Expr.cpp | 2 +- .../core/src/exec/operator/FilterBitsNode.cpp | 5 +- .../core/src/exec/operator/GroupByNode.cpp | 3 +- .../src/exec/operator/IterativeFilterNode.cpp | 4 +- .../src/exec/operator/RandomSampleNode.cpp | 4 +- .../core/src/exec/operator/RescoresNode.cpp | 3 +- .../src/exec/operator/VectorSearchNode.cpp | 3 +- internal/core/src/index/SkipIndex.h | 1 + .../src/segcore/ChunkedSegmentSealedImpl.cpp | 3 +- .../core/src/segcore/SegmentInterface.cpp | 4 +- internal/core/src/segcore/Utils.cpp | 12 +++++ internal/core/src/segcore/Utils.h | 4 ++ internal/core/src/segcore/reduce/Reduce.cpp | 2 +- internal/core/src/segcore/segcore_init_c.cpp | 6 ++- internal/core/src/segcore/segcore_init_c.h | 3 +- .../storagev1translator/ChunkTranslator.cpp | 3 ++ .../storagev1translator/ChunkTranslator.h | 2 + .../DefaultValueChunkTranslator.cpp | 3 ++ .../InterimSealedIndexTranslator.cpp | 3 ++ .../SealedIndexTranslator.cpp | 5 +- .../V1SealedIndexTranslator.cpp | 5 +- .../segcore/storagev2translator/GroupCTMeta.h | 2 + .../GroupChunkTranslator.cpp | 11 ++++ internal/core/src/storage/FileWriter.cpp | 6 +-- .../core/src/storage/MinioChunkManager.cpp | 44 ++++++++-------- .../src/storage/azure/AzureChunkManager.cpp | 40 +++++++------- .../GcpNativeChunkManager.cpp | 40 +++++++------- .../thirdparty/milvus-common/CMakeLists.txt | 2 +- internal/core/unittest/init_gtest.cpp | 2 +- .../test_utils/cachinglayer_test_utils.h | 3 ++ internal/util/initcore/init_core.go | 4 +- pkg/util/paramtable/component_param.go | 52 ++++++++++++------- 33 files changed, 184 insertions(+), 108 deletions(-) diff --git a/configs/milvus.yaml b/configs/milvus.yaml index 6344c292d5..506637f233 100644 --- a/configs/milvus.yaml +++ b/configs/milvus.yaml @@ -501,7 +501,11 @@ queryNode: # This parameter is only valid when eviction is enabled. # It defaults to 0.3 (meaning about 30% of evictable on-disk data can be cached), with a valid range of [0.0, 1.0]. evictableDiskCacheRatio: 0.3 - # Time in seconds after which an unaccessed cache cell will be evicted. + # Enable background eviction for Tiered Storage. Defaults to false. + # Background eviction is used to do periodic eviction in a separate thread. + # And it will only work when both 'evictionEnabled' and 'backgroundEvictionEnabled' are set to 'true'. + backgroundEvictionEnabled: false + # Time in seconds after which an unaccessed cache cell will be evicted. 'backgroundEvictionEnabled' is required. # If a cached data hasn't been accessed again after this time since its last access, it will be evicted. # If set to 0, time based eviction is disabled. cacheTtl: 0 diff --git a/internal/core/src/exec/expression/Expr.cpp b/internal/core/src/exec/expression/Expr.cpp index 9e1caea103..6303cf65bf 100644 --- a/internal/core/src/exec/expression/Expr.cpp +++ b/internal/core/src/exec/expression/Expr.cpp @@ -642,7 +642,7 @@ OptimizeCompiledExprs(ExecContext* context, const std::vector& exprs) { std::chrono::high_resolution_clock::now(); double cost = std::chrono::duration(end - start).count(); - monitor::internal_core_optimize_expr_latency.Observe(cost / 1000); + milvus::monitor::internal_core_optimize_expr_latency.Observe(cost / 1000); } } // namespace exec diff --git a/internal/core/src/exec/operator/FilterBitsNode.cpp b/internal/core/src/exec/operator/FilterBitsNode.cpp index 380e359a6a..4855a2c2b7 100644 --- a/internal/core/src/exec/operator/FilterBitsNode.cpp +++ b/internal/core/src/exec/operator/FilterBitsNode.cpp @@ -101,7 +101,7 @@ PhyFilterBitsNode::GetOutput() { Assert(valid_bitset.size() == need_process_rows_); auto filter_ratio = bitset.size() != 0 ? 1 - float(bitset.count()) / bitset.size() : 0; - monitor::internal_core_expr_filter_ratio.Observe(filter_ratio); + milvus::monitor::internal_core_expr_filter_ratio.Observe(filter_ratio); // num_processed_rows_ = need_process_rows_; std::vector col_res; col_res.push_back(std::make_shared(std::move(bitset), @@ -111,7 +111,8 @@ PhyFilterBitsNode::GetOutput() { double scalar_cost = std::chrono::duration(scalar_end - scalar_start) .count(); - monitor::internal_core_search_latency_scalar.Observe(scalar_cost / 1000); + milvus::monitor::internal_core_search_latency_scalar.Observe(scalar_cost / + 1000); return std::make_shared(col_res); } diff --git a/internal/core/src/exec/operator/GroupByNode.cpp b/internal/core/src/exec/operator/GroupByNode.cpp index 50a4cbdfac..50fd3bb954 100644 --- a/internal/core/src/exec/operator/GroupByNode.cpp +++ b/internal/core/src/exec/operator/GroupByNode.cpp @@ -85,7 +85,8 @@ PhyGroupByNode::GetOutput() { double vector_cost = std::chrono::duration(vector_end - vector_start) .count(); - monitor::internal_core_search_latency_groupby.Observe(vector_cost / 1000); + milvus::monitor::internal_core_search_latency_groupby.Observe(vector_cost / + 1000); return input_; } diff --git a/internal/core/src/exec/operator/IterativeFilterNode.cpp b/internal/core/src/exec/operator/IterativeFilterNode.cpp index 9bc2712172..7c36a5c7f6 100644 --- a/internal/core/src/exec/operator/IterativeFilterNode.cpp +++ b/internal/core/src/exec/operator/IterativeFilterNode.cpp @@ -263,8 +263,8 @@ PhyIterativeFilterNode::GetOutput() { double scalar_cost = std::chrono::duration(scalar_end - scalar_start) .count(); - monitor::internal_core_search_latency_iterative_filter.Observe(scalar_cost / - 1000); + milvus::monitor::internal_core_search_latency_iterative_filter.Observe( + scalar_cost / 1000); return input_; } diff --git a/internal/core/src/exec/operator/RandomSampleNode.cpp b/internal/core/src/exec/operator/RandomSampleNode.cpp index 41b07ae431..8449698948 100644 --- a/internal/core/src/exec/operator/RandomSampleNode.cpp +++ b/internal/core/src/exec/operator/RandomSampleNode.cpp @@ -164,8 +164,8 @@ PhyRandomSampleNode::GetOutput() { std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration(end - start).count(); - monitor::internal_core_search_latency_random_sample.Observe(duration / - 1000); + milvus::monitor::internal_core_search_latency_random_sample.Observe( + duration / 1000); is_finished_ = true; return result; } diff --git a/internal/core/src/exec/operator/RescoresNode.cpp b/internal/core/src/exec/operator/RescoresNode.cpp index 437f6391ba..ba1b1a8286 100644 --- a/internal/core/src/exec/operator/RescoresNode.cpp +++ b/internal/core/src/exec/operator/RescoresNode.cpp @@ -152,7 +152,8 @@ PhyRescoresNode::GetOutput() { double scalar_cost = std::chrono::duration(scalar_end - scalar_start) .count(); - monitor::internal_core_search_latency_rescore.Observe(scalar_cost / 1000); + milvus::monitor::internal_core_search_latency_rescore.Observe(scalar_cost / + 1000); return input_; }; diff --git a/internal/core/src/exec/operator/VectorSearchNode.cpp b/internal/core/src/exec/operator/VectorSearchNode.cpp index 8a3656f8a3..f08eec39da 100644 --- a/internal/core/src/exec/operator/VectorSearchNode.cpp +++ b/internal/core/src/exec/operator/VectorSearchNode.cpp @@ -99,7 +99,8 @@ PhyVectorSearchNode::GetOutput() { double vector_cost = std::chrono::duration(vector_end - vector_start) .count(); - monitor::internal_core_search_latency_vector.Observe(vector_cost / 1000); + milvus::monitor::internal_core_search_latency_vector.Observe(vector_cost / + 1000); // for now, vector search store result in query_context // this node interface just return bitset return input_; diff --git a/internal/core/src/index/SkipIndex.h b/internal/core/src/index/SkipIndex.h index fc1c4d32d2..304fbf38df 100644 --- a/internal/core/src/index/SkipIndex.h +++ b/internal/core/src/index/SkipIndex.h @@ -79,6 +79,7 @@ class FieldChunkMetricsTranslator column_(column), meta_(cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::IDENTICAL, + milvus::cachinglayer::CellDataType::OTHER, CacheWarmupPolicy::CacheWarmupPolicy_Disable, false) { } diff --git a/internal/core/src/segcore/ChunkedSegmentSealedImpl.cpp b/internal/core/src/segcore/ChunkedSegmentSealedImpl.cpp index 2842ec7c79..6fcbd4890e 100644 --- a/internal/core/src/segcore/ChunkedSegmentSealedImpl.cpp +++ b/internal/core/src/segcore/ChunkedSegmentSealedImpl.cpp @@ -1919,7 +1919,8 @@ ChunkedSegmentSealedImpl::bulk_subscript(FieldId field_id, double get_vector_cost = std::chrono::duration( get_vector_end - get_vector_start) .count(); - monitor::internal_core_get_vector_latency.Observe(get_vector_cost / 1000); + milvus::monitor::internal_core_get_vector_latency.Observe(get_vector_cost / + 1000); return vector; } diff --git a/internal/core/src/segcore/SegmentInterface.cpp b/internal/core/src/segcore/SegmentInterface.cpp index 68be77095f..64fae7801e 100644 --- a/internal/core/src/segcore/SegmentInterface.cpp +++ b/internal/core/src/segcore/SegmentInterface.cpp @@ -150,7 +150,7 @@ SegmentInternalInterface::Retrieve(tracer::TraceContext* trace_ctx, double get_entry_cost = std::chrono::duration( get_target_entry_end - get_target_entry_start) .count(); - monitor::internal_core_retrieve_get_target_entry_latency.Observe( + milvus::monitor::internal_core_retrieve_get_target_entry_latency.Observe( get_entry_cost / 1000); return results; } @@ -274,7 +274,7 @@ SegmentInternalInterface::Retrieve(tracer::TraceContext* trace_ctx, double get_entry_cost = std::chrono::duration( get_target_entry_end - get_target_entry_start) .count(); - monitor::internal_core_retrieve_get_target_entry_latency.Observe( + milvus::monitor::internal_core_retrieve_get_target_entry_latency.Observe( get_entry_cost / 1000); return results; } diff --git a/internal/core/src/segcore/Utils.cpp b/internal/core/src/segcore/Utils.cpp index 87a1bde70e..f762f1dc67 100644 --- a/internal/core/src/segcore/Utils.cpp +++ b/internal/core/src/segcore/Utils.cpp @@ -1117,4 +1117,16 @@ getCacheWarmupPolicy(bool is_vector, bool is_index, bool in_load_list) { : manager.getScalarFieldCacheWarmupPolicy(); } } + +milvus::cachinglayer::CellDataType +getCellDataType(bool is_vector, bool is_index) { + if (is_index) { + return is_vector ? milvus::cachinglayer::CellDataType::VECTOR_INDEX + : milvus::cachinglayer::CellDataType::SCALAR_INDEX; + } else { + return is_vector ? milvus::cachinglayer::CellDataType::VECTOR_FIELD + : milvus::cachinglayer::CellDataType::SCALAR_FIELD; + } +} + } // namespace milvus::segcore diff --git a/internal/core/src/segcore/Utils.h b/internal/core/src/segcore/Utils.h index fe8d0fdb0d..d551e88d71 100644 --- a/internal/core/src/segcore/Utils.h +++ b/internal/core/src/segcore/Utils.h @@ -20,6 +20,7 @@ #include "common/type_c.h" #include "common/Types.h" #include "index/Index.h" +#include "cachinglayer/Utils.h" #include "segcore/ConcurrentVector.h" namespace milvus::segcore { @@ -139,4 +140,7 @@ upper_bound(const ConcurrentVector& timestamps, CacheWarmupPolicy getCacheWarmupPolicy(bool is_vector, bool is_index, bool in_load_list = true); +milvus::cachinglayer::CellDataType +getCellDataType(bool is_vector, bool is_index); + } // namespace milvus::segcore diff --git a/internal/core/src/segcore/reduce/Reduce.cpp b/internal/core/src/segcore/reduce/Reduce.cpp index 2c0af354cd..52bd545ea2 100644 --- a/internal/core/src/segcore/reduce/Reduce.cpp +++ b/internal/core/src/segcore/reduce/Reduce.cpp @@ -199,7 +199,7 @@ ReduceHelper::FillEntryData() { std::chrono::duration(get_target_entry_end - get_target_entry_start) .count(); - monitor::internal_core_search_get_target_entry_latency.Observe( + milvus::monitor::internal_core_search_get_target_entry_latency.Observe( get_entry_cost / 1000); } } diff --git a/internal/core/src/segcore/segcore_init_c.cpp b/internal/core/src/segcore/segcore_init_c.cpp index 7fb0909e60..e82dfba9bf 100644 --- a/internal/core/src/segcore/segcore_init_c.cpp +++ b/internal/core/src/segcore/segcore_init_c.cpp @@ -195,8 +195,9 @@ ConfigureTieredStorage(const CacheWarmupPolicy scalarFieldCacheWarmupPolicy, const int64_t disk_low_watermark_bytes, const int64_t disk_high_watermark_bytes, const int64_t disk_max_bytes, - const bool evictionEnabled, + const bool eviction_enabled, const int64_t cache_touch_window_ms, + const bool background_eviction_enabled, const int64_t eviction_interval_ms, const int64_t cache_cell_unaccessed_survival_time, const float overloaded_memory_threshold_percentage, @@ -215,8 +216,9 @@ ConfigureTieredStorage(const CacheWarmupPolicy scalarFieldCacheWarmupPolicy, disk_low_watermark_bytes, disk_high_watermark_bytes, disk_max_bytes}, - evictionEnabled, + eviction_enabled, {cache_touch_window_ms, + background_eviction_enabled, eviction_interval_ms, cache_cell_unaccessed_survival_time, overloaded_memory_threshold_percentage, diff --git a/internal/core/src/segcore/segcore_init_c.h b/internal/core/src/segcore/segcore_init_c.h index b4ced4c1e0..c1230dd90e 100644 --- a/internal/core/src/segcore/segcore_init_c.h +++ b/internal/core/src/segcore/segcore_init_c.h @@ -107,9 +107,10 @@ ConfigureTieredStorage( const int64_t disk_high_watermark_bytes, const int64_t disk_max_bytes, // eviction enabled - const bool evictionEnabled, + const bool eviction_enabled, // eviction configs const int64_t cache_touch_window_ms, + const bool background_eviction_enabled, const int64_t eviction_interval_ms, const int64_t cache_cell_unaccessed_survival_time, const float overloaded_memory_threshold_percentage, diff --git a/internal/core/src/segcore/storagev1translator/ChunkTranslator.cpp b/internal/core/src/segcore/storagev1translator/ChunkTranslator.cpp index 5bbe1fffa7..bc5051524c 100644 --- a/internal/core/src/segcore/storagev1translator/ChunkTranslator.cpp +++ b/internal/core/src/segcore/storagev1translator/ChunkTranslator.cpp @@ -84,6 +84,9 @@ ChunkTranslator::ChunkTranslator( meta_(use_mmap ? milvus::cachinglayer::StorageType::DISK : milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::IDENTICAL, + milvus::segcore::getCellDataType( + IsVectorDataType(field_meta.get_data_type()), + /* is_index */ false), milvus::segcore::getCacheWarmupPolicy( IsVectorDataType(field_meta.get_data_type()), /* is_index */ false, diff --git a/internal/core/src/segcore/storagev1translator/ChunkTranslator.h b/internal/core/src/segcore/storagev1translator/ChunkTranslator.h index 8d96014625..8df8126d87 100644 --- a/internal/core/src/segcore/storagev1translator/ChunkTranslator.h +++ b/internal/core/src/segcore/storagev1translator/ChunkTranslator.h @@ -32,10 +32,12 @@ struct CTMeta : public milvus::cachinglayer::Meta { virt_chunk_order_; // indicates the size of each virtual chunk, i.e. 2^virt_chunk_order_ CTMeta(milvus::cachinglayer::StorageType storage_type, milvus::cachinglayer::CellIdMappingMode cell_id_mapping_mode, + milvus::cachinglayer::CellDataType cell_data_type, CacheWarmupPolicy cache_warmup_policy, bool support_eviction) : milvus::cachinglayer::Meta(storage_type, cell_id_mapping_mode, + cell_data_type, cache_warmup_policy, support_eviction) { } diff --git a/internal/core/src/segcore/storagev1translator/DefaultValueChunkTranslator.cpp b/internal/core/src/segcore/storagev1translator/DefaultValueChunkTranslator.cpp index ab65fa699a..75d48d4a5d 100644 --- a/internal/core/src/segcore/storagev1translator/DefaultValueChunkTranslator.cpp +++ b/internal/core/src/segcore/storagev1translator/DefaultValueChunkTranslator.cpp @@ -30,6 +30,9 @@ DefaultValueChunkTranslator::DefaultValueChunkTranslator( meta_(use_mmap ? milvus::cachinglayer::StorageType::DISK : milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO, + milvus::segcore::getCellDataType( + IsVectorDataType(field_meta.get_data_type()), + /* is_index */ false), milvus::segcore::getCacheWarmupPolicy( IsVectorDataType(field_meta.get_data_type()), /* is_index */ false, diff --git a/internal/core/src/segcore/storagev1translator/InterimSealedIndexTranslator.cpp b/internal/core/src/segcore/storagev1translator/InterimSealedIndexTranslator.cpp index f10d4ee095..3ad81c7474 100644 --- a/internal/core/src/segcore/storagev1translator/InterimSealedIndexTranslator.cpp +++ b/internal/core/src/segcore/storagev1translator/InterimSealedIndexTranslator.cpp @@ -24,6 +24,9 @@ InterimSealedIndexTranslator::InterimSealedIndexTranslator( index_key_(fmt::format("seg_{}_ii_{}", segment_id, field_id)), meta_(milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO, + milvus::segcore::getCellDataType( + /* is_vector */ true, + /* is_index */ true), milvus::segcore::getCacheWarmupPolicy( /* is_vector */ true, /* is_index */ true), diff --git a/internal/core/src/segcore/storagev1translator/SealedIndexTranslator.cpp b/internal/core/src/segcore/storagev1translator/SealedIndexTranslator.cpp index 9a3a3e6f0a..4aca3d51d1 100644 --- a/internal/core/src/segcore/storagev1translator/SealedIndexTranslator.cpp +++ b/internal/core/src/segcore/storagev1translator/SealedIndexTranslator.cpp @@ -36,8 +36,11 @@ SealedIndexTranslator::SealedIndexTranslator( ? milvus::cachinglayer::StorageType::DISK : milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO, + milvus::segcore::getCellDataType( + /* is_vector */ IsVectorDataType(load_index_info->field_type), + /* is_index */ true), milvus::segcore::getCacheWarmupPolicy( - IsVectorDataType(load_index_info->field_type), + /* is_vector */ IsVectorDataType(load_index_info->field_type), /* is_index */ true), /* support_eviction */ // if index data supports lazy load internally, we don't need to support eviction for index metadata diff --git a/internal/core/src/segcore/storagev1translator/V1SealedIndexTranslator.cpp b/internal/core/src/segcore/storagev1translator/V1SealedIndexTranslator.cpp index 3232b0839f..f69dc8b463 100644 --- a/internal/core/src/segcore/storagev1translator/V1SealedIndexTranslator.cpp +++ b/internal/core/src/segcore/storagev1translator/V1SealedIndexTranslator.cpp @@ -34,8 +34,11 @@ V1SealedIndexTranslator::V1SealedIndexTranslator( ? milvus::cachinglayer::StorageType::DISK : milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::ALWAYS_ZERO, + milvus::segcore::getCellDataType( + /* is_vector */ IsVectorDataType(load_index_info->field_type), + /* is_index */ true), milvus::segcore::getCacheWarmupPolicy( - IsVectorDataType(load_index_info->field_type), + /* is_vector */ IsVectorDataType(load_index_info->field_type), /* is_index */ true), /* support_eviction */ false) { } diff --git a/internal/core/src/segcore/storagev2translator/GroupCTMeta.h b/internal/core/src/segcore/storagev2translator/GroupCTMeta.h index e238c3f174..a49c7151a4 100644 --- a/internal/core/src/segcore/storagev2translator/GroupCTMeta.h +++ b/internal/core/src/segcore/storagev2translator/GroupCTMeta.h @@ -26,10 +26,12 @@ struct GroupCTMeta : public milvus::cachinglayer::Meta { GroupCTMeta(size_t num_fields, milvus::cachinglayer::StorageType storage_type, milvus::cachinglayer::CellIdMappingMode cell_id_mapping_mode, + milvus::cachinglayer::CellDataType cell_data_type, CacheWarmupPolicy cache_warmup_policy, bool support_eviction) : milvus::cachinglayer::Meta(storage_type, cell_id_mapping_mode, + cell_data_type, cache_warmup_policy, support_eviction), num_fields_(num_fields) { diff --git a/internal/core/src/segcore/storagev2translator/GroupChunkTranslator.cpp b/internal/core/src/segcore/storagev2translator/GroupChunkTranslator.cpp index 596038b242..59384a63e1 100644 --- a/internal/core/src/segcore/storagev2translator/GroupChunkTranslator.cpp +++ b/internal/core/src/segcore/storagev2translator/GroupChunkTranslator.cpp @@ -62,6 +62,17 @@ GroupChunkTranslator::GroupChunkTranslator( use_mmap ? milvus::cachinglayer::StorageType::DISK : milvus::cachinglayer::StorageType::MEMORY, milvus::cachinglayer::CellIdMappingMode::IDENTICAL, + milvus::segcore::getCellDataType( + /* is_vector */ + [&]() { + for (const auto& [fid, field_meta] : field_metas_) { + if (IsVectorDataType(field_meta.get_data_type())) { + return true; + } + } + return false; + }(), + /* is_index */ false), milvus::segcore::getCacheWarmupPolicy( /* is_vector */ [&]() { diff --git a/internal/core/src/storage/FileWriter.cpp b/internal/core/src/storage/FileWriter.cpp index 9ec5cc40d3..dd9317765a 100644 --- a/internal/core/src/storage/FileWriter.cpp +++ b/internal/core/src/storage/FileWriter.cpp @@ -209,14 +209,14 @@ FileWriter::WriteWithDirectIO(const void* data, size_t nbyte) { assert(src == static_cast(data) + nbyte); - monitor::disk_write_total_bytes_direct.Increment(nbyte); + milvus::monitor::disk_write_total_bytes_direct.Increment(nbyte); } void FileWriter::WriteWithBufferedIO(const void* data, size_t nbyte) { PositionedWriteWithCheck(data, nbyte, file_size_); file_size_ += nbyte; - monitor::disk_write_total_bytes_buffered.Increment(nbyte); + milvus::monitor::disk_write_total_bytes_buffered.Increment(nbyte); } void @@ -270,7 +270,7 @@ FileWriter::FlushWithDirectIO() { nearest_aligned_offset - offset_); PositionedWriteWithCheck(aligned_buf_, nearest_aligned_offset, file_size_); file_size_ += offset_; - monitor::disk_write_total_bytes_direct.Increment(offset_); + milvus::monitor::disk_write_total_bytes_direct.Increment(offset_); // truncate the file to the actual size since the file written by the aligned buffer may be larger than the actual size if (ftruncate(fd_, file_size_) != 0) { Cleanup(); diff --git a/internal/core/src/storage/MinioChunkManager.cpp b/internal/core/src/storage/MinioChunkManager.cpp index 923d616f17..9085c621aa 100644 --- a/internal/core/src/storage/MinioChunkManager.cpp +++ b/internal/core/src/storage/MinioChunkManager.cpp @@ -484,7 +484,7 @@ MinioChunkManager::ObjectExists(const std::string& bucket_name, auto start = std::chrono::system_clock::now(); auto outcome = client_->HeadObject(request); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); @@ -492,17 +492,17 @@ MinioChunkManager::ObjectExists(const std::string& bucket_name, if (!outcome.IsSuccess()) { const auto& err = outcome.GetError(); if (!IsNotFound(err.GetErrorType())) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); ThrowS3Error("ObjectExists", err, "params, bucket={}, object={}", bucket_name, object_name); } - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); return false; } - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); return true; } @@ -515,12 +515,12 @@ MinioChunkManager::GetObjectSize(const std::string& bucket_name, auto start = std::chrono::system_clock::now(); auto outcome = client_->HeadObject(request); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); if (!outcome.IsSuccess()) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); const auto& err = outcome.GetError(); ThrowS3Error("GetObjectSize", err, @@ -528,7 +528,7 @@ MinioChunkManager::GetObjectSize(const std::string& bucket_name, bucket_name, object_name); } - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); return outcome.GetResult().GetContentLength(); } @@ -541,7 +541,7 @@ MinioChunkManager::DeleteObject(const std::string& bucket_name, auto start = std::chrono::system_clock::now(); auto outcome = client_->DeleteObject(request); - monitor::internal_storage_request_latency_remove.Observe( + milvus::monitor::internal_storage_request_latency_remove.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); @@ -549,17 +549,17 @@ MinioChunkManager::DeleteObject(const std::string& bucket_name, if (!outcome.IsSuccess()) { const auto& err = outcome.GetError(); if (!IsNotFound(err.GetErrorType())) { - monitor::internal_storage_op_count_remove_fail.Increment(); + milvus::monitor::internal_storage_op_count_remove_fail.Increment(); ThrowS3Error("DeleteObject", err, "params, bucket={}, object={}", bucket_name, object_name); } - monitor::internal_storage_op_count_remove_suc.Increment(); + milvus::monitor::internal_storage_op_count_remove_suc.Increment(); return false; } - monitor::internal_storage_op_count_remove_suc.Increment(); + milvus::monitor::internal_storage_op_count_remove_suc.Increment(); return true; } @@ -580,14 +580,14 @@ MinioChunkManager::PutObjectBuffer(const std::string& bucket_name, auto start = std::chrono::system_clock::now(); auto outcome = client_->PutObject(request); - monitor::internal_storage_request_latency_put.Observe( + milvus::monitor::internal_storage_request_latency_put.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_kv_size_put.Observe(size); + milvus::monitor::internal_storage_kv_size_put.Observe(size); if (!outcome.IsSuccess()) { - monitor::internal_storage_op_count_put_fail.Increment(); + milvus::monitor::internal_storage_op_count_put_fail.Increment(); const auto& err = outcome.GetError(); ThrowS3Error("PutObjectBuffer", err, @@ -595,7 +595,7 @@ MinioChunkManager::PutObjectBuffer(const std::string& bucket_name, bucket_name, object_name); } - monitor::internal_storage_op_count_put_suc.Increment(); + milvus::monitor::internal_storage_op_count_put_suc.Increment(); return true; } @@ -661,14 +661,14 @@ MinioChunkManager::GetObjectBuffer(const std::string& bucket_name, }); auto start = std::chrono::system_clock::now(); auto outcome = client_->GetObject(request); - monitor::internal_storage_request_latency_get.Observe( + milvus::monitor::internal_storage_request_latency_get.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_kv_size_get.Observe(size); + milvus::monitor::internal_storage_kv_size_get.Observe(size); if (!outcome.IsSuccess()) { - monitor::internal_storage_op_count_get_fail.Increment(); + milvus::monitor::internal_storage_op_count_get_fail.Increment(); const auto& err = outcome.GetError(); ThrowS3Error("GetObjectBuffer", err, @@ -676,7 +676,7 @@ MinioChunkManager::GetObjectBuffer(const std::string& bucket_name, bucket_name, object_name); } - monitor::internal_storage_op_count_get_suc.Increment(); + milvus::monitor::internal_storage_op_count_get_suc.Increment(); return size; } @@ -692,13 +692,13 @@ MinioChunkManager::ListObjects(const std::string& bucket_name, auto start = std::chrono::system_clock::now(); auto outcome = client_->ListObjects(request); - monitor::internal_storage_request_latency_list.Observe( + milvus::monitor::internal_storage_request_latency_list.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); if (!outcome.IsSuccess()) { - monitor::internal_storage_op_count_list_fail.Increment(); + milvus::monitor::internal_storage_op_count_list_fail.Increment(); const auto& err = outcome.GetError(); ThrowS3Error("ListObjects", err, @@ -706,7 +706,7 @@ MinioChunkManager::ListObjects(const std::string& bucket_name, bucket_name, prefix); } - monitor::internal_storage_op_count_list_suc.Increment(); + milvus::monitor::internal_storage_op_count_list_suc.Increment(); auto objects = outcome.GetResult().GetContents(); for (auto& obj : objects) { objects_vec.emplace_back(obj.GetKey()); diff --git a/internal/core/src/storage/azure/AzureChunkManager.cpp b/internal/core/src/storage/azure/AzureChunkManager.cpp index a06f8b23fc..917c6b3ae7 100644 --- a/internal/core/src/storage/azure/AzureChunkManager.cpp +++ b/internal/core/src/storage/azure/AzureChunkManager.cpp @@ -175,13 +175,13 @@ AzureChunkManager::ObjectExists(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->ObjectExists(bucket_name, object_name); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); ThrowAzureError("ObjectExists", err, "params, bucket={}, object={}", @@ -198,13 +198,13 @@ AzureChunkManager::GetObjectSize(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->GetObjectSize(bucket_name, object_name); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); ThrowAzureError("GetObjectSize", err, "params, bucket={}, object={}", @@ -221,13 +221,13 @@ AzureChunkManager::DeleteObject(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->DeleteObject(bucket_name, object_name); - monitor::internal_storage_request_latency_remove.Observe( + milvus::monitor::internal_storage_request_latency_remove.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_remove_suc.Increment(); + milvus::monitor::internal_storage_op_count_remove_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_remove_fail.Increment(); + milvus::monitor::internal_storage_op_count_remove_fail.Increment(); ThrowAzureError("DeleteObject", err, "params, bucket={}, object={}", @@ -246,14 +246,14 @@ AzureChunkManager::PutObjectBuffer(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->PutObjectBuffer(bucket_name, object_name, buf, size); - monitor::internal_storage_request_latency_put.Observe( + milvus::monitor::internal_storage_request_latency_put.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_put_suc.Increment(); - monitor::internal_storage_kv_size_put.Observe(size); + milvus::monitor::internal_storage_op_count_put_suc.Increment(); + milvus::monitor::internal_storage_kv_size_put.Observe(size); } catch (std::exception& err) { - monitor::internal_storage_op_count_put_fail.Increment(); + milvus::monitor::internal_storage_op_count_put_fail.Increment(); ThrowAzureError("PutObjectBuffer", err, "params, bucket={}, object={}", @@ -272,14 +272,14 @@ AzureChunkManager::GetObjectBuffer(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->GetObjectBuffer(bucket_name, object_name, buf, size); - monitor::internal_storage_request_latency_get.Observe( + milvus::monitor::internal_storage_request_latency_get.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_get_suc.Increment(); - monitor::internal_storage_kv_size_get.Observe(size); + milvus::monitor::internal_storage_op_count_get_suc.Increment(); + milvus::monitor::internal_storage_kv_size_get.Observe(size); } catch (std::exception& err) { - monitor::internal_storage_op_count_get_fail.Increment(); + milvus::monitor::internal_storage_op_count_get_fail.Increment(); ThrowAzureError("GetObjectBuffer", err, "params, bucket={}, object={}", @@ -296,13 +296,13 @@ AzureChunkManager::ListObjects(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->ListObjects(bucket_name, prefix); - monitor::internal_storage_request_latency_list.Observe( + milvus::monitor::internal_storage_request_latency_list.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_list_suc.Increment(); + milvus::monitor::internal_storage_op_count_list_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_list_fail.Increment(); + milvus::monitor::internal_storage_op_count_list_fail.Increment(); ThrowAzureError("ListObjects", err, "params, bucket={}, prefix={}", diff --git a/internal/core/src/storage/gcp-native-storage/GcpNativeChunkManager.cpp b/internal/core/src/storage/gcp-native-storage/GcpNativeChunkManager.cpp index 0de8315848..1e6ed89c01 100644 --- a/internal/core/src/storage/gcp-native-storage/GcpNativeChunkManager.cpp +++ b/internal/core/src/storage/gcp-native-storage/GcpNativeChunkManager.cpp @@ -135,13 +135,13 @@ GcpNativeChunkManager::ObjectExists(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->ObjectExists(bucket_name, object_name); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); ThrowGcpNativeError("ObjectExists", err, "params, bucket={}, object={}", @@ -158,13 +158,13 @@ GcpNativeChunkManager::GetObjectSize(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->GetObjectSize(bucket_name, object_name); - monitor::internal_storage_request_latency_stat.Observe( + milvus::monitor::internal_storage_request_latency_stat.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_stat_suc.Increment(); + milvus::monitor::internal_storage_op_count_stat_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_stat_fail.Increment(); + milvus::monitor::internal_storage_op_count_stat_fail.Increment(); ThrowGcpNativeError("GetObjectSize", err, "params, bucket={}, object={}", @@ -181,13 +181,13 @@ GcpNativeChunkManager::DeleteObject(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->DeleteObject(bucket_name, object_name); - monitor::internal_storage_request_latency_remove.Observe( + milvus::monitor::internal_storage_request_latency_remove.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_remove_suc.Increment(); + milvus::monitor::internal_storage_op_count_remove_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_remove_fail.Increment(); + milvus::monitor::internal_storage_op_count_remove_fail.Increment(); ThrowGcpNativeError("DeleteObject", err, "params, bucket={}, object={}", @@ -206,14 +206,14 @@ GcpNativeChunkManager::PutObjectBuffer(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->PutObjectBuffer(bucket_name, object_name, buf, size); - monitor::internal_storage_request_latency_put.Observe( + milvus::monitor::internal_storage_request_latency_put.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_put_suc.Increment(); - monitor::internal_storage_kv_size_put.Observe(size); + milvus::monitor::internal_storage_op_count_put_suc.Increment(); + milvus::monitor::internal_storage_kv_size_put.Observe(size); } catch (std::exception& err) { - monitor::internal_storage_op_count_put_fail.Increment(); + milvus::monitor::internal_storage_op_count_put_fail.Increment(); ThrowGcpNativeError("PutObjectBuffer", err, "params, bucket={}, object={}", @@ -232,14 +232,14 @@ GcpNativeChunkManager::GetObjectBuffer(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->GetObjectBuffer(bucket_name, object_name, buf, size); - monitor::internal_storage_request_latency_get.Observe( + milvus::monitor::internal_storage_request_latency_get.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_get_suc.Increment(); - monitor::internal_storage_kv_size_get.Observe(size); + milvus::monitor::internal_storage_op_count_get_suc.Increment(); + milvus::monitor::internal_storage_kv_size_get.Observe(size); } catch (std::exception& err) { - monitor::internal_storage_op_count_get_fail.Increment(); + milvus::monitor::internal_storage_op_count_get_fail.Increment(); ThrowGcpNativeError("GetObjectBuffer", err, "params, bucket={}, object={}", @@ -256,13 +256,13 @@ GcpNativeChunkManager::ListObjects(const std::string& bucket_name, try { auto start = std::chrono::system_clock::now(); res = client_->ListObjects(bucket_name, prefix); - monitor::internal_storage_request_latency_list.Observe( + milvus::monitor::internal_storage_request_latency_list.Observe( std::chrono::duration_cast( std::chrono::system_clock::now() - start) .count()); - monitor::internal_storage_op_count_list_suc.Increment(); + milvus::monitor::internal_storage_op_count_list_suc.Increment(); } catch (std::exception& err) { - monitor::internal_storage_op_count_list_fail.Increment(); + milvus::monitor::internal_storage_op_count_list_fail.Increment(); ThrowGcpNativeError("ListObjects", err, "params, bucket={}, prefix={}", diff --git a/internal/core/thirdparty/milvus-common/CMakeLists.txt b/internal/core/thirdparty/milvus-common/CMakeLists.txt index 0ba131f1b3..b7d7b7af0e 100644 --- a/internal/core/thirdparty/milvus-common/CMakeLists.txt +++ b/internal/core/thirdparty/milvus-common/CMakeLists.txt @@ -13,7 +13,7 @@ milvus_add_pkg_config("milvus-common") set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES "") -set( MILVUS-COMMON-VERSION 2008bac ) +set( MILVUS-COMMON-VERSION 7204eb6 ) set( GIT_REPOSITORY "https://github.com/zilliztech/milvus-common.git") message(STATUS "milvus-common repo: ${GIT_REPOSITORY}") diff --git a/internal/core/unittest/init_gtest.cpp b/internal/core/unittest/init_gtest.cpp index 8c317381f1..8423b5b69c 100644 --- a/internal/core/unittest/init_gtest.cpp +++ b/internal/core/unittest/init_gtest.cpp @@ -38,7 +38,7 @@ main(int argc, char** argv) { CacheWarmupPolicy::CacheWarmupPolicy_Disable}, {1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb, 1024 * mb}, true, - {10, 30}); + {10, true, 30}); return RUN_ALL_TESTS(); } diff --git a/internal/core/unittest/test_utils/cachinglayer_test_utils.h b/internal/core/unittest/test_utils/cachinglayer_test_utils.h index a0785e0c4e..ae09aa2360 100644 --- a/internal/core/unittest/test_utils/cachinglayer_test_utils.h +++ b/internal/core/unittest/test_utils/cachinglayer_test_utils.h @@ -40,6 +40,7 @@ class TestChunkTranslator : public Translator { meta_(segcore::storagev1translator::CTMeta( StorageType::MEMORY, CellIdMappingMode::IDENTICAL, + CellDataType::SCALAR_FIELD, CacheWarmupPolicy::CacheWarmupPolicy_Disable, true)) { meta_.num_rows_until_chunk_.reserve(num_cells_ + 1); @@ -118,6 +119,7 @@ class TestGroupChunkTranslator : public Translator { num_fields, StorageType::MEMORY, CellIdMappingMode::IDENTICAL, + CellDataType::OTHER, CacheWarmupPolicy::CacheWarmupPolicy_Disable, true)) { meta_.num_rows_until_chunk_.reserve(num_cells_ + 1); @@ -185,6 +187,7 @@ class TestIndexTranslator : public Translator { meta_(milvus::cachinglayer::Meta( StorageType::MEMORY, CellIdMappingMode::IDENTICAL, + CellDataType::OTHER, CacheWarmupPolicy::CacheWarmupPolicy_Disable, false)) { } diff --git a/internal/util/initcore/init_core.go b/internal/util/initcore/init_core.go index 6f1c0444d7..c646afc937 100644 --- a/internal/util/initcore/init_core.go +++ b/internal/util/initcore/init_core.go @@ -353,6 +353,7 @@ func InitTieredStorage(params *paramtable.ComponentParam) error { evictionEnabled := C.bool(params.QueryNodeCfg.TieredEvictionEnabled.GetAsBool()) cacheTouchWindowMs := C.int64_t(params.QueryNodeCfg.TieredCacheTouchWindowMs.GetAsInt64()) + backgroundEvictionEnabled := C.bool(params.QueryNodeCfg.TieredBackgroundEvictionEnabled.GetAsBool()) evictionIntervalMs := C.int64_t(params.QueryNodeCfg.TieredEvictionIntervalMs.GetAsInt64()) cacheCellUnaccessedSurvivalTime := C.int64_t(params.QueryNodeCfg.CacheCellUnaccessedSurvivalTime.GetAsInt64()) loadingResourceFactor := C.float(params.QueryNodeCfg.TieredLoadingResourceFactor.GetAsFloat()) @@ -367,7 +368,8 @@ func InitTieredStorage(params *paramtable.ComponentParam) error { vectorIndexCacheWarmupPolicy, memoryLowWatermarkBytes, memoryHighWatermarkBytes, memoryMaxBytes, diskLowWatermarkBytes, diskHighWatermarkBytes, diskMaxBytes, - evictionEnabled, cacheTouchWindowMs, evictionIntervalMs, cacheCellUnaccessedSurvivalTime, + evictionEnabled, cacheTouchWindowMs, + backgroundEvictionEnabled, evictionIntervalMs, cacheCellUnaccessedSurvivalTime, overloadedMemoryThresholdPercentage, loadingResourceFactor, maxDiskUsagePercentage, diskPath) tieredEvictableMemoryCacheRatio := params.QueryNodeCfg.TieredEvictableMemoryCacheRatio.GetAsFloat() diff --git a/pkg/util/paramtable/component_param.go b/pkg/util/paramtable/component_param.go index e6af7e5dc4..2543db88e8 100644 --- a/pkg/util/paramtable/component_param.go +++ b/pkg/util/paramtable/component_param.go @@ -2958,9 +2958,10 @@ type queryNodeConfig struct { TieredEvictableMemoryCacheRatio ParamItem `refreshable:"false"` TieredEvictableDiskCacheRatio ParamItem `refreshable:"false"` TieredCacheTouchWindowMs ParamItem `refreshable:"false"` + TieredBackgroundEvictionEnabled ParamItem `refreshable:"false"` TieredEvictionIntervalMs ParamItem `refreshable:"false"` - TieredLoadingResourceFactor ParamItem `refreshable:"false"` CacheCellUnaccessedSurvivalTime ParamItem `refreshable:"false"` + TieredLoadingResourceFactor ParamItem `refreshable:"false"` KnowhereScoreConsistency ParamItem `refreshable:"false"` @@ -3300,6 +3301,17 @@ eviction is necessary and the amount of data to evict from memory/disk. } p.TieredCacheTouchWindowMs.Init(base.mgr) + p.TieredBackgroundEvictionEnabled = ParamItem{ + Key: "queryNode.segcore.tieredStorage.backgroundEvictionEnabled", + Version: "2.6.2", + DefaultValue: "false", + Doc: `Enable background eviction for Tiered Storage. Defaults to false. +Background eviction is used to do periodic eviction in a separate thread. +And it will only work when both 'evictionEnabled' and 'backgroundEvictionEnabled' are set to 'true'.`, + Export: true, + } + p.TieredBackgroundEvictionEnabled.Init(base.mgr) + p.TieredEvictionIntervalMs = ParamItem{ Key: "queryNode.segcore.tieredStorage.evictionIntervalMs", Version: "2.6.0", @@ -3311,11 +3323,29 @@ eviction is necessary and the amount of data to evict from memory/disk. } return fmt.Sprintf("%d", window) }, - Doc: "Interval in milliseconds to run periodic eviction.", + Doc: "Interval in milliseconds to run periodic eviction. 'backgroundEvictionEnabled' is required.", Export: false, } p.TieredEvictionIntervalMs.Init(base.mgr) + p.CacheCellUnaccessedSurvivalTime = ParamItem{ + Key: "queryNode.segcore.tieredStorage.cacheTtl", + Version: "2.6.0", + DefaultValue: "0", + Formatter: func(v string) string { + timeout := getAsInt64(v) + if timeout <= 0 { + return "0" + } + return fmt.Sprintf("%d", timeout) + }, + Doc: `Time in seconds after which an unaccessed cache cell will be evicted. 'backgroundEvictionEnabled' is required. +If a cached data hasn't been accessed again after this time since its last access, it will be evicted. +If set to 0, time based eviction is disabled.`, + Export: true, + } + p.CacheCellUnaccessedSurvivalTime.Init(base.mgr) + p.TieredLoadingResourceFactor = ParamItem{ Key: "queryNode.segcore.tieredStorage.loadingResourceFactor", Version: "2.6.0", @@ -3332,24 +3362,6 @@ eviction is necessary and the amount of data to evict from memory/disk. } p.TieredLoadingResourceFactor.Init(base.mgr) - p.CacheCellUnaccessedSurvivalTime = ParamItem{ - Key: "queryNode.segcore.tieredStorage.cacheTtl", - Version: "2.6.0", - DefaultValue: "0", - Formatter: func(v string) string { - timeout := getAsInt64(v) - if timeout <= 0 { - return "0" - } - return fmt.Sprintf("%d", timeout) - }, - Doc: `Time in seconds after which an unaccessed cache cell will be evicted. -If a cached data hasn't been accessed again after this time since its last access, it will be evicted. -If set to 0, time based eviction is disabled.`, - Export: true, - } - p.CacheCellUnaccessedSurvivalTime.Init(base.mgr) - p.EnableDisk = ParamItem{ Key: "queryNode.enableDisk", Version: "2.2.0",