enhance: Refine geometry cache with offsets (#44432)

issue: #43427

---------

Signed-off-by: Cai Zhang <cai.zhang@zilliz.com>
This commit is contained in:
cai.zhang 2025-09-18 20:24:02 +08:00 committed by GitHub
parent 94b1d66535
commit 5b8288a0ef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 467 additions and 142 deletions

View File

@ -15,80 +15,93 @@
#include <mutex>
#include <shared_mutex>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
#include "common/EasyAssert.h"
#include "common/Geometry.h"
#include "common/Types.h"
namespace milvus {
namespace exec {
// Custom hash function for segment id + field id pair
struct SegmentFieldHash {
std::size_t
operator()(const std::pair<int64_t, int64_t>& p) const {
return std::hash<int64_t>{}(p.first) ^
(std::hash<int64_t>{}(p.second) << 1);
}
};
// Helper function to create cache key from segment_id and field_id
inline std::string
MakeCacheKey(int64_t segment_id, FieldId field_id) {
return std::to_string(segment_id) + "_" + std::to_string(field_id.get());
}
// Simple WKB-based Geometry cache for avoiding repeated WKB->Geometry conversions
// Vector-based Geometry cache that maintains original field data order
class SimpleGeometryCache {
public:
// Get or create Geometry from WKB data
std::shared_ptr<const Geometry>
GetOrCreate(const std::string_view& wkb_data) {
// Use WKB data content as key (could be optimized with hash later)
std::string key(wkb_data);
// Try read-only access first (most common case)
{
std::shared_lock<std::shared_mutex> lock(mutex_);
auto it = cache_.find(key);
if (it != cache_.end()) {
return it->second;
}
}
// Cache miss: create new Geometry with write lock
// Append WKB data during field loading
void
AppendData(const char* wkb_data, size_t size) {
std::lock_guard<std::shared_mutex> lock(mutex_);
// Double-check after acquiring write lock
auto it = cache_.find(key);
if (it != cache_.end()) {
return it->second;
if (size == 0 || wkb_data == nullptr) {
// Handle null/empty geometry - add invalid geometry
geometries_.emplace_back();
} else {
try {
// Create geometry directly in the vector
geometries_.emplace_back(wkb_data, size);
} catch (const std::exception& e) {
PanicInfo(UnexpectedError,
"Failed to construct geometry from WKB data: {}",
e.what());
}
}
}
// Construct new Geometry
try {
auto geometry = std::make_shared<const Geometry>(wkb_data.data(),
wkb_data.size());
cache_.emplace(key, geometry);
return geometry;
} catch (...) {
// Return nullptr on construction failure
// Get shared lock for batch operations (RAII)
std::shared_lock<std::shared_mutex>
AcquireReadLock() const {
return std::shared_lock<std::shared_mutex>(mutex_);
}
// Get Geometry by offset without locking (use with AcquireReadLock)
const Geometry*
GetByOffsetUnsafe(size_t offset) const {
if (offset >= geometries_.size()) {
return nullptr;
}
const auto& geometry = geometries_[offset];
return geometry.IsValid() ? &geometry : nullptr;
}
// Get Geometry by offset (thread-safe read for filtering)
const Geometry*
GetByOffset(size_t offset) const {
std::shared_lock<std::shared_mutex> lock(mutex_);
return GetByOffsetUnsafe(offset);
}
// Get total number of loaded geometries
size_t
Size() const {
std::shared_lock<std::shared_mutex> lock(mutex_);
return geometries_.size();
}
// Clear all cached geometries
void
Clear() {
std::lock_guard<std::shared_mutex> lock(mutex_);
cache_.clear();
geometries_.clear();
}
// Get cache statistics
size_t
Size() const {
// Check if cache is loaded
bool
IsLoaded() const {
std::shared_lock<std::shared_mutex> lock(mutex_);
return cache_.size();
return !geometries_.empty();
}
private:
mutable std::shared_mutex mutex_;
std::unordered_map<std::string, std::shared_ptr<const Geometry>> cache_;
mutable std::shared_mutex mutex_; // For read/write operations
std::vector<Geometry> geometries_; // Direct storage of Geometry objects
};
// Global cache instance per segment+field
@ -105,7 +118,7 @@ class SimpleGeometryCacheManager {
SimpleGeometryCache&
GetCache(int64_t segment_id, FieldId field_id) {
std::lock_guard<std::mutex> lock(mutex_);
auto key = std::make_pair(segment_id, field_id.get());
auto key = MakeCacheKey(segment_id, field_id);
auto it = caches_.find(key);
if (it != caches_.end()) {
return *(it->second);
@ -120,7 +133,7 @@ class SimpleGeometryCacheManager {
void
RemoveCache(int64_t segment_id, FieldId field_id) {
std::lock_guard<std::mutex> lock(mutex_);
auto key = std::make_pair(segment_id, field_id.get());
auto key = MakeCacheKey(segment_id, field_id);
caches_.erase(key);
}
@ -128,9 +141,11 @@ class SimpleGeometryCacheManager {
void
RemoveSegmentCaches(int64_t segment_id) {
std::lock_guard<std::mutex> lock(mutex_);
auto segment_prefix = std::to_string(segment_id) + "_";
auto it = caches_.begin();
while (it != caches_.end()) {
if (it->first.first == segment_id) {
if (it->first.substr(0, segment_prefix.length()) ==
segment_prefix) {
it = caches_.erase(it);
} else {
++it;
@ -141,6 +156,7 @@ class SimpleGeometryCacheManager {
// Get cache statistics for monitoring
struct CacheStats {
size_t total_caches = 0;
size_t loaded_caches = 0;
size_t total_geometries = 0;
};
@ -150,7 +166,10 @@ class SimpleGeometryCacheManager {
CacheStats stats;
stats.total_caches = caches_.size();
for (const auto& [key, cache] : caches_) {
stats.total_geometries += cache->Size();
if (cache->IsLoaded()) {
stats.loaded_caches++;
stats.total_geometries += cache->Size();
}
}
return stats;
}
@ -161,11 +180,30 @@ class SimpleGeometryCacheManager {
operator=(const SimpleGeometryCacheManager&) = delete;
mutable std::mutex mutex_;
std::unordered_map<std::pair<int64_t, int64_t>,
std::unique_ptr<SimpleGeometryCache>,
SegmentFieldHash>
std::unordered_map<std::string, std::unique_ptr<SimpleGeometryCache>>
caches_;
};
} // namespace exec
// Convenient global functions for direct access to geometry cache
inline const Geometry*
GetGeometryByOffset(int64_t segment_id, FieldId field_id, size_t offset) {
auto& cache = exec::SimpleGeometryCacheManager::Instance().GetCache(
segment_id, field_id);
return cache.GetByOffset(offset);
}
inline void
RemoveGeometryCache(int64_t segment_id, FieldId field_id) {
exec::SimpleGeometryCacheManager::Instance().RemoveCache(segment_id,
field_id);
}
inline void
RemoveSegmentGeometryCaches(int64_t segment_id) {
exec::SimpleGeometryCacheManager::Instance().RemoveSegmentCaches(
segment_id);
}
} // namespace milvus

View File

@ -335,7 +335,10 @@ class SegmentExpr : public Expr {
// used for processing raw data expr for sealed segments.
// now only used for std::string_view && json
// TODO: support more types
template <typename T, typename FUNC, typename... ValTypes>
template <typename T,
bool NeedSegmentOffsets = false,
typename FUNC,
typename... ValTypes>
int64_t
ProcessChunkForSealedSeg(
FUNC func,
@ -354,13 +357,30 @@ class SegmentExpr : public Expr {
if (!skip_func || !skip_func(skip_index, field_id_, 0)) {
// first is the raw data, second is valid_data
// use valid_data to see if raw data is null
func(views_info.first.data(),
views_info.second.data(),
nullptr,
need_size,
res,
valid_res,
values...);
if constexpr (NeedSegmentOffsets) {
// For GIS functions: construct segment offsets array
std::vector<int32_t> segment_offsets_array(need_size);
for (int64_t j = 0; j < need_size; ++j) {
segment_offsets_array[j] =
static_cast<int32_t>(current_data_chunk_pos_ + j);
}
func(views_info.first.data(),
views_info.second.data(),
nullptr,
segment_offsets_array.data(),
need_size,
res,
valid_res,
values...);
} else {
func(views_info.first.data(),
views_info.second.data(),
nullptr,
need_size,
res,
valid_res,
values...);
}
} else {
ApplyValidData(views_info.second.data(), res, valid_res, need_size);
}
@ -616,7 +636,11 @@ class SegmentExpr : public Expr {
return input->size();
}
template <typename T, typename FUNC, typename... ValTypes>
// Template parameter to control whether segment offsets are needed (for GIS functions)
template <typename T,
bool NeedSegmentOffsets = false,
typename FUNC,
typename... ValTypes>
int64_t
ProcessDataChunksForSingleChunk(
FUNC func,
@ -628,7 +652,7 @@ class SegmentExpr : public Expr {
if constexpr (std::is_same_v<T, std::string_view> ||
std::is_same_v<T, Json>) {
if (segment_->type() == SegmentType::Sealed) {
return ProcessChunkForSealedSeg<T>(
return ProcessChunkForSealedSeg<T, NeedSegmentOffsets>(
func, skip_func, res, valid_res, values...);
}
}
@ -653,15 +677,34 @@ class SegmentExpr : public Expr {
if (valid_data != nullptr) {
valid_data += data_pos;
}
if (!skip_func || !skip_func(skip_index, field_id_, i)) {
const T* data = chunk.data() + data_pos;
func(data,
valid_data,
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
if constexpr (NeedSegmentOffsets) {
// For GIS functions: construct segment offsets array
std::vector<int32_t> segment_offsets_array(size);
for (int64_t j = 0; j < size; ++j) {
segment_offsets_array[j] = static_cast<int32_t>(
size_per_chunk_ * i + data_pos + j);
}
func(data,
valid_data,
nullptr,
segment_offsets_array.data(),
size,
res + processed_size,
valid_res + processed_size,
values...);
} else {
func(data,
valid_data,
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
}
} else {
ApplyValidData(valid_data,
res + processed_size,
@ -679,7 +722,11 @@ class SegmentExpr : public Expr {
return processed_size;
}
template <typename T, typename FUNC, typename... ValTypes>
template <typename T,
bool NeedSegmentOffsets = false,
typename FUNC,
typename... ValTypes>
int64_t
ProcessDataChunksForMultipleChunk(
FUNC func,
@ -706,7 +753,13 @@ class SegmentExpr : public Expr {
size = std::min(size, batch_size_ - processed_size);
if (size == 0)
continue; //do not go empty-loop at the bound of the chunk
std::vector<int32_t> segment_offsets_array(size);
auto start_offset =
segment_->num_rows_until_chunk(field_id_, i) + data_pos;
for (int64_t j = 0; j < size; ++j) {
int64_t offset = start_offset + j;
segment_offsets_array[j] = static_cast<int32_t>(offset);
}
auto& skip_index = segment_->GetSkipIndex();
if (!skip_func || !skip_func(skip_index, field_id_, i)) {
bool is_seal = false;
@ -719,13 +772,26 @@ class SegmentExpr : public Expr {
auto [data_vec, valid_data] =
segment_->get_batch_views<T>(
field_id_, i, data_pos, size);
func(data_vec.data(),
valid_data.data(),
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
if constexpr (NeedSegmentOffsets) {
// For GIS functions: construct segment offsets array
func(data_vec.data(),
valid_data.data(),
nullptr,
segment_offsets_array.data(),
size,
res + processed_size,
valid_res + processed_size,
values...);
} else {
// For regular functions: no segment offsets
func(data_vec.data(),
valid_data.data(),
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
}
is_seal = true;
}
}
@ -736,13 +802,26 @@ class SegmentExpr : public Expr {
if (valid_data != nullptr) {
valid_data += data_pos;
}
func(data,
valid_data,
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
if constexpr (NeedSegmentOffsets) {
// For GIS functions: construct segment offsets array
func(data,
valid_data,
nullptr,
segment_offsets_array.data(),
size,
res + processed_size,
valid_res + processed_size,
values...);
} else {
func(data,
valid_data,
nullptr,
size,
res + processed_size,
valid_res + processed_size,
values...);
}
}
} else {
const bool* valid_data;
@ -782,7 +861,10 @@ class SegmentExpr : public Expr {
return processed_size;
}
template <typename T, typename FUNC, typename... ValTypes>
template <typename T,
bool NeedSegmentOffsets = false,
typename FUNC,
typename... ValTypes>
int64_t
ProcessDataChunks(
FUNC func,
@ -791,10 +873,10 @@ class SegmentExpr : public Expr {
TargetBitmapView valid_res,
ValTypes... values) {
if (segment_->is_chunked()) {
return ProcessDataChunksForMultipleChunk<T>(
return ProcessDataChunksForMultipleChunk<T, NeedSegmentOffsets>(
func, skip_func, res, valid_res, values...);
} else {
return ProcessDataChunksForSingleChunk<T>(
return ProcessDataChunksForSingleChunk<T, NeedSegmentOffsets>(
func, skip_func, res, valid_res, values...);
}
}

View File

@ -15,7 +15,6 @@
#include "common/Geometry.h"
#include "common/Types.h"
#include "pb/plan.pb.h"
#include "pb/schema.pb.h"
#include <cmath>
#include <fmt/core.h>
namespace milvus {
@ -25,36 +24,32 @@ namespace exec {
auto execute_sub_batch = [this](const _DataType* data, \
const bool* valid_data, \
const int32_t* offsets, \
const int32_t* segment_offsets, \
const int size, \
TargetBitmapView res, \
TargetBitmapView valid_res, \
const Geometry& right_source) { \
AssertInfo(segment_offsets != nullptr, \
"segment_offsets should not be nullptr"); \
/* Unified path using simple WKB-content-based cache for both sealed and growing segments. */ \
auto& geometry_cache = \
SimpleGeometryCacheManager::Instance().GetCache( \
this->segment_->get_segment_id(), field_id_); \
auto cache_lock = geometry_cache.AcquireReadLock(); \
for (int i = 0; i < size; ++i) { \
if (valid_data != nullptr && !valid_data[i]) { \
res[i] = valid_res[i] = false; \
continue; \
} \
/* Create string_view from WKB data for cache lookup */ \
std::string_view wkb_view(data[i].data(), data[i].size()); \
auto cached_geometry = geometry_cache.GetOrCreate(wkb_view); \
\
bool result = false; \
if (cached_geometry != nullptr) { \
/* Use cached geometry for operation */ \
result = cached_geometry->method(right_source); \
} else { \
/* Fallback: construct temporary geometry (cache construction failed) */ \
Geometry tmp(data[i].data(), data[i].size()); \
result = tmp.method(right_source); \
} \
res[i] = result; \
auto absolute_offset = segment_offsets[i]; \
auto cached_geometry = \
geometry_cache.GetByOffsetUnsafe(absolute_offset); \
AssertInfo(cached_geometry != nullptr, \
"cached geometry is nullptr"); \
res[i] = cached_geometry->method(right_source); \
} \
}; \
int64_t processed_size = ProcessDataChunks<_DataType>( \
int64_t processed_size = ProcessDataChunks<_DataType, true>( \
execute_sub_batch, std::nullptr_t{}, res, valid_res, right_source); \
AssertInfo(processed_size == real_batch_size, \
"internal error: expr processed rows {} not equal " \
@ -68,20 +63,31 @@ namespace exec {
auto execute_sub_batch = [this](const _DataType* data, \
const bool* valid_data, \
const int32_t* offsets, \
const int32_t* segment_offsets, \
const int size, \
TargetBitmapView res, \
TargetBitmapView valid_res, \
const Geometry& right_source) { \
AssertInfo(segment_offsets != nullptr, \
"segment_offsets should not be nullptr"); \
auto& geometry_cache = \
SimpleGeometryCacheManager::Instance().GetCache( \
this->segment_->get_segment_id(), field_id_); \
auto cache_lock = geometry_cache.AcquireReadLock(); \
for (int i = 0; i < size; ++i) { \
if (valid_data != nullptr && !valid_data[i]) { \
res[i] = valid_res[i] = false; \
continue; \
} \
res[i] = Geometry(data[i].data(), data[i].size()) \
.method(right_source, expr_->distance_); \
auto absolute_offset = segment_offsets[i]; \
auto cached_geometry = \
geometry_cache.GetByOffsetUnsafe(absolute_offset); \
AssertInfo(cached_geometry != nullptr, \
"cached geometry is nullptr"); \
res[i] = cached_geometry->method(right_source, expr_->distance_); \
} \
}; \
int64_t processed_size = ProcessDataChunks<_DataType>( \
int64_t processed_size = ProcessDataChunks<_DataType, true>( \
execute_sub_batch, std::nullptr_t{}, res, valid_res, right_source); \
AssertInfo(processed_size == real_batch_size, \
"internal error: expr processed rows {} not equal " \
@ -89,7 +95,6 @@ namespace exec {
processed_size, \
real_batch_size); \
return res_vec;
void
PhyGISFunctionFilterExpr::Eval(EvalCtx& context, VectorPtr& result) {
AssertInfo(expr_->column_.data_type_ == DataType::GEOMETRY,
@ -352,39 +357,17 @@ PhyGISFunctionFilterExpr::EvalForIndexSegment() {
auto& geometry_cache =
SimpleGeometryCacheManager::Instance().GetCache(
segment_->get_segment_id(), field_id_);
auto data_array = segment_->bulk_subscript(
field_id_, hit_offsets.data(), hit_offsets.size());
auto geometry_array =
static_cast<const milvus::proto::schema::GeometryArray*>(
&data_array->scalars().geometry_data());
const auto& valid_data = data_array->valid_data();
auto cache_lock = geometry_cache.AcquireReadLock();
for (size_t i = 0; i < hit_offsets.size(); ++i) {
const auto pos = hit_offsets[i];
// Skip invalid data
if (!valid_data.empty() && !valid_data[i]) {
auto cached_geometry =
geometry_cache.GetByOffsetUnsafe(pos);
// skip invalid geometry
if (cached_geometry == nullptr) {
continue;
}
const auto& wkb_data = geometry_array->data(i);
// Get or create cached Geometry from simple cache
std::string_view wkb_view(wkb_data.data(), wkb_data.size());
auto cached_geometry = geometry_cache.GetOrCreate(wkb_view);
// Evaluate geometry: use cached if available, otherwise construct temporarily
bool result = false;
if (cached_geometry != nullptr) {
result = evaluate_geometry(*cached_geometry);
} else {
// Fallback: construct temporary geometry (cache construction failed)
Geometry temp_geometry(wkb_data.data(),
wkb_data.size());
result = evaluate_geometry(temp_geometry);
}
bool result = evaluate_geometry(*cached_geometry);
if (result) {
refined.set(pos);

View File

@ -383,6 +383,10 @@ ChunkedSegmentSealedImpl::LoadFieldData(FieldId field_id, FieldDataInfo& data) {
// var_column->Seal();
stats_.mem_size += var_column->DataByteSize();
field_data_size = var_column->DataByteSize();
// Construct GeometryCache for the entire field
LoadGeometryCache(field_id, *var_column);
column = std::move(var_column);
break;
}
@ -1310,9 +1314,8 @@ ChunkedSegmentSealedImpl::ChunkedSegmentSealedImpl(
}
ChunkedSegmentSealedImpl::~ChunkedSegmentSealedImpl() {
// Clean up geometry cache for all fields in this segment
auto& cache_manager = milvus::exec::SimpleGeometryCacheManager::Instance();
cache_manager.RemoveSegmentCaches(get_segment_id());
// Clean up geometry cache for all fields in this segment using global function
milvus::RemoveSegmentGeometryCaches(get_segment_id());
auto cc = storage::MmapManager::GetInstance().GetChunkCache();
if (cc == nullptr) {
@ -2219,4 +2222,48 @@ ChunkedSegmentSealedImpl::RemoveFieldFile(const FieldId field_id) {
}
}
void
ChunkedSegmentSealedImpl::LoadGeometryCache(
FieldId field_id, const ChunkedVariableColumn<std::string>& var_column) {
try {
// Get geometry cache for this segment+field
auto& geometry_cache =
milvus::exec::SimpleGeometryCacheManager::Instance().GetCache(
get_segment_id(), field_id);
// Iterate through all chunks and collect WKB data
auto num_chunks = var_column.num_chunks();
for (int64_t chunk_id = 0; chunk_id < num_chunks; ++chunk_id) {
// Get all string views from this chunk
auto [string_views, valid_data] = var_column.StringViews(chunk_id);
// Add each string view to the geometry cache
for (size_t i = 0; i < string_views.size(); ++i) {
if (valid_data.empty() || valid_data[i]) {
// Valid geometry data
const auto& wkb_data = string_views[i];
geometry_cache.AppendData(wkb_data.data(), wkb_data.size());
} else {
// Null/invalid geometry
geometry_cache.AppendData(nullptr, 0);
}
}
}
LOG_INFO(
"Successfully loaded geometry cache for segment {} field {} with "
"{} geometries",
get_segment_id(),
field_id.get(),
geometry_cache.Size());
} catch (const std::exception& e) {
PanicInfo(UnexpectedError,
"Failed to load geometry cache for segment {} field {}: {}",
get_segment_id(),
field_id.get(),
e.what());
}
}
} // namespace milvus::segcore

View File

@ -284,6 +284,11 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
return insert_record_.timestamps_;
}
// Load Geometry cache for a field
void
LoadGeometryCache(FieldId field_id,
const ChunkedVariableColumn<std::string>& var_column);
private:
template <typename S, typename T = S>
static void

View File

@ -178,6 +178,14 @@ SegmentGrowingImpl::Insert(int64_t reserved_offset,
reserved_offset);
}
// Build geometry cache for GEOMETRY fields
if (field_meta.get_data_type() == DataType::GEOMETRY) {
BuildGeometryCacheForInsert(
field_id,
&insert_record_proto->fields_data(data_offset),
num_rows);
}
// update average row data size
auto field_data_size = GetRawDataSizeOfDataArray(
&insert_record_proto->fields_data(data_offset),
@ -315,6 +323,11 @@ SegmentGrowingImpl::LoadFieldData(const LoadFieldDataInfo& infos) {
index->Reload();
}
// Build geometry cache for GEOMETRY fields
if (field_meta.get_data_type() == DataType::GEOMETRY) {
BuildGeometryCacheForLoad(field_id, field_data);
}
// update the mem size
stats_.mem_size += storage::GetByteSizeOfFieldDatas(field_data);
@ -1008,4 +1021,96 @@ SegmentGrowingImpl::GetJsonData(FieldId field_id, size_t offset) const {
return std::make_pair(std::string_view(src[offset]), true);
}
void
SegmentGrowingImpl::BuildGeometryCacheForInsert(FieldId field_id,
const DataArray* data_array,
int64_t num_rows) {
try {
// Get geometry cache for this segment+field
auto& geometry_cache =
milvus::exec::SimpleGeometryCacheManager::Instance().GetCache(
get_segment_id(), field_id);
// Process geometry data from DataArray
const auto& geometry_data = data_array->scalars().geometry_data();
const auto& valid_data = data_array->valid_data();
for (int64_t i = 0; i < num_rows; ++i) {
if (valid_data.empty() ||
(i < valid_data.size() && valid_data[i])) {
// Valid geometry data
const auto& wkb_data = geometry_data.data(i);
geometry_cache.AppendData(wkb_data.data(), wkb_data.size());
} else {
// Null/invalid geometry
geometry_cache.AppendData(nullptr, 0);
}
}
LOG_INFO(
"Successfully appended {} geometries to cache for growing segment "
"{} field {}",
num_rows,
get_segment_id(),
field_id.get());
} catch (const std::exception& e) {
PanicInfo(UnexpectedError,
"Failed to build geometry cache for growing segment {} field "
"{} insert: {}",
get_segment_id(),
field_id.get(),
e.what());
}
}
void
SegmentGrowingImpl::BuildGeometryCacheForLoad(
FieldId field_id, const std::vector<FieldDataPtr>& field_data) {
try {
// Get geometry cache for this segment+field
auto& geometry_cache =
milvus::exec::SimpleGeometryCacheManager::Instance().GetCache(
get_segment_id(), field_id);
// Process each field data chunk
for (const auto& data : field_data) {
auto num_rows = data->get_num_rows();
for (int64_t i = 0; i < num_rows; ++i) {
if (data->is_valid(i)) {
// Valid geometry data
auto wkb_data =
static_cast<const std::string*>(data->RawValue(i));
geometry_cache.AppendData(wkb_data->data(),
wkb_data->size());
} else {
// Null/invalid geometry
geometry_cache.AppendData(nullptr, 0);
}
}
}
size_t total_rows = 0;
for (const auto& data : field_data) {
total_rows += data->get_num_rows();
}
LOG_INFO(
"Successfully loaded {} geometries to cache for growing segment {} "
"field {}",
total_rows,
get_segment_id(),
field_id.get());
} catch (const std::exception& e) {
PanicInfo(UnexpectedError,
"Failed to build geometry cache for growing segment {} field "
"{} load: {}",
get_segment_id(),
field_id.get(),
e.what());
}
}
} // namespace milvus::segcore

View File

@ -85,6 +85,18 @@ class SegmentGrowingImpl : public SegmentGrowing {
void
CreateTextIndex(FieldId field_id) override;
private:
// Build geometry cache for inserted data
void
BuildGeometryCacheForInsert(FieldId field_id,
const DataArray* data_array,
int64_t num_rows);
// Build geometry cache for loaded field data
void
BuildGeometryCacheForLoad(FieldId field_id,
const std::vector<FieldDataPtr>& field_data);
public:
const InsertRecord<>&
get_insert_record() const {

View File

@ -445,6 +445,10 @@ SegmentSealedImpl::LoadFieldData(FieldId field_id, FieldDataInfo& data) {
var_column->Seal();
stats_.mem_size += var_column->MemoryUsageBytes();
field_data_size = var_column->DataByteSize();
// Construct GeometryCache for the entire field
LoadGeometryCache(field_id, *var_column);
column = std::move(var_column);
break;
}
@ -617,6 +621,10 @@ SegmentSealedImpl::MapFieldData(const FieldId field_id, FieldDataInfo& data) {
field_meta,
DEFAULT_MMAP_VRCOL_BLOCK_SIZE);
var_column->Seal(std::move(indices));
// Construct GeometryCache for the entire field (mmap mode)
LoadGeometryCache(field_id, *var_column);
column = std::move(var_column);
break;
}
@ -1280,9 +1288,8 @@ SegmentSealedImpl::SegmentSealedImpl(SchemaPtr schema,
}
SegmentSealedImpl::~SegmentSealedImpl() {
// Clean up geometry cache for all fields in this segment
auto& cache_manager = milvus::exec::SimpleGeometryCacheManager::Instance();
cache_manager.RemoveSegmentCaches(get_segment_id());
// Clean up geometry cache for all fields in this segment using global function
milvus::RemoveSegmentGeometryCaches(get_segment_id());
auto cc = storage::MmapManager::GetInstance().GetChunkCache();
if (cc == nullptr) {
@ -2255,4 +2262,45 @@ SegmentSealedImpl::GetJsonData(FieldId field_id, size_t offset) const {
return std::make_pair(std::move(column->RawAt(offset)), is_valid);
}
void
SegmentSealedImpl::LoadGeometryCache(
FieldId field_id,
const SingleChunkVariableColumn<std::string>& var_column) {
try {
// Get geometry cache for this segment+field
auto& geometry_cache =
milvus::exec::SimpleGeometryCacheManager::Instance().GetCache(
get_segment_id(), field_id);
// Get all string views from the single chunk
auto [string_views, valid_data] = var_column.StringViews();
// Add each string view to the geometry cache
for (size_t i = 0; i < string_views.size(); ++i) {
if (valid_data.empty() || valid_data[i]) {
// Valid geometry data
const auto& wkb_data = string_views[i];
geometry_cache.AppendData(wkb_data.data(), wkb_data.size());
} else {
// Null/invalid geometry
geometry_cache.AppendData(nullptr, 0);
}
}
LOG_INFO(
"Successfully loaded geometry cache for segment {} field {} with "
"{} geometries",
get_segment_id(),
field_id.get(),
geometry_cache.Size());
} catch (const std::exception& e) {
PanicInfo(UnexpectedError,
"Failed to load geometry cache for segment {} field {}: {}",
get_segment_id(),
field_id.get(),
e.what());
}
}
} // namespace milvus::segcore

View File

@ -339,6 +339,11 @@ class SegmentSealedImpl : public SegmentSealed {
deleted_record_.set_sealed_row_count(row_count);
}
// Load Geometry cache for a field
void
LoadGeometryCache(FieldId field_id,
const SingleChunkVariableColumn<std::string>& var_column);
void
mask_with_timestamps(BitsetTypeView& bitset_chunk,
Timestamp timestamp,