mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 01:28:27 +08:00
Cherry-pick from master pr: #45061 #45488 #45803 #46017 #44991 #45132 #45723 #45726 #45798 #45897 #45918 #44998 This feature integrates the Storage V2 (Loon) FFI interface as a unified storage layer for segment loading and index building in Milvus. It enables manifest-based data access, replacing the traditional binlog-based approach with a more efficient columnar storage format. Key changes: ### Segment Self-Managed Loading Architecture - Move segment loading orchestration from Go layer to C++ segcore - Add NewSegmentWithLoadInfo() API for passing load info during segment creation - Implement SetLoadInfo() and Load() methods in SegmentInterface - Support parallel loading of indexed and non-indexed fields - Enable both sealed and growing segments to self-manage loading ### Storage V2 FFI Integration - Integrate milvus-storage library's FFI interface for packed columnar data - Add manifest path support throughout the data path (SegmentInfo, LoadInfo) - Implement ManifestReader for generating manifests from binlogs - Support zero-copy data exchange using Arrow C Data Interface - Add ToCStorageConfig() for Go-to-C storage config conversion ### Manifest-Based Index Building - Extend FileManagerContext to carry loon_ffi_properties - Implement GetFieldDatasFromManifest() using Arrow C Stream interface - Support manifest-based reading in DiskFileManagerImpl and MemFileManagerImpl - Add fallback to traditional segment insert files when manifest unavailable ### Compaction Pipeline Updates - Include manifest path in all compaction task builders (clustering, L0, mix) - Update BulkPackWriterV2 to return manifest path - Propagate manifest metadata through compaction pipeline ### Configuration & Protocol - Add common.storageV2.useLoonFFI config option (default: false) - Add manifest_path field to SegmentLoadInfo and related proto messages - Add manifest field to compaction segment messages ### Bug Fixes - Fix mmap settings not applied during segment load (key typo fix) - Populate index info after segment loading to prevent redundant load tasks - Fix memory corruption by removing premature transaction handle destruction Related issues: #44956, #45060, #39173 ## Individual Cherry-Picked Commits 1. **e1c923b5cc** - fix: apply mmap settings correctly during segment load (#46017) 2. **63b912370b** - enhance: use milvus-storage internal C++ Reader API for Loon FFI (#45897) 3. **bfc192faa5** - enhance: Resolve issues integrating loon FFI (#45918) 4. **fb18564631** - enhance: support manifest-based index building with Loon FFI reader (#45726) 5. **b9ec2392b9** - enhance: integrate StorageV2 FFI interface for manifest-based segment loading (#45798) 6. **66db3c32e6** - enhance: integrate Storage V2 FFI interface for unified storage access (#45723) 7. **ae789273ac** - fix: populate index info after segment loading to prevent redundant load tasks (#45803) 8. **49688b0be2** - enhance: Move segment loading logic from Go layer to segcore for self-managed loading (#45488) 9. **5b2df88bac** - enhance: [StorageV2] Integrate FFI interface for packed reader (#45132) 10. **91ff5706ac** - enhance: [StorageV2] add manifest path support for FFI integration (#44991) 11. **2192bb4a85** - enhance: add NewSegmentWithLoadInfo API to support segment self-managed loading (#45061) 12. **4296b01da0** - enhance: update delta log serialization APIs to integrate storage V2 (#44998) ## Technical Details ### Architecture Changes - **Before**: Go layer orchestrated segment loading, making multiple CGO calls - **After**: Segments autonomously manage loading in C++ layer with single entry point ### Storage Access Pattern - **Before**: Read individual binlog files through Go storage layer - **After**: Read manifest file that references packed columnar data via FFI ### Benefits - Reduced cross-language call overhead - Better resource management at C++ level - Improved I/O performance through batched streaming reads - Cleaner separation of concerns between Go and C++ layers - Foundation for proactive schema evolution handling --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com> Co-authored-by: Ted Xu <ted.xu@zilliz.com>
120 lines
4.2 KiB
Go
120 lines
4.2 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
||
// or more contributor license agreements. See the NOTICE file
|
||
// distributed with this work for additional information
|
||
// regarding copyright ownership. The ASF licenses this file
|
||
// to you under the Apache License, Version 2.0 (the
|
||
// "License"); you may not use this file except in compliance
|
||
// with the License. You may obtain a copy of the License at
|
||
//
|
||
// http://www.apache.org/licenses/LICENSE-2.0
|
||
//
|
||
// Unless required by applicable law or agreed to in writing, software
|
||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
// See the License for the specific language governing permissions and
|
||
// limitations under the License.
|
||
|
||
package segments
|
||
|
||
import (
|
||
"context"
|
||
|
||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||
pkoracle "github.com/milvus-io/milvus/internal/querynodev2/pkoracle"
|
||
"github.com/milvus-io/milvus/internal/storage"
|
||
"github.com/milvus-io/milvus/internal/util/segcore"
|
||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||
"github.com/milvus-io/milvus/pkg/v2/proto/querypb"
|
||
"github.com/milvus-io/milvus/pkg/v2/proto/segcorepb"
|
||
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||
)
|
||
|
||
// ResourceUsage is used to estimate the resource usage of a sealed segment.
|
||
type ResourceUsage struct {
|
||
MemorySize uint64
|
||
DiskSize uint64
|
||
MmapFieldCount int
|
||
FieldGpuMemorySize []uint64
|
||
}
|
||
|
||
// Segment is the interface of a segment implementation.
|
||
// Some methods can not apply to all segment types,such as LoadInfo, ResourceUsageEstimate.
|
||
// Add more interface to represent different segment types is a better implementation.
|
||
type Segment interface {
|
||
// ResourceUsageEstimate() ResourceUsage
|
||
|
||
// Properties
|
||
ID() int64
|
||
DatabaseName() string
|
||
ResourceGroup() string
|
||
Collection() int64
|
||
Partition() int64
|
||
Shard() metautil.Channel
|
||
Version() int64
|
||
CASVersion(int64, int64) bool
|
||
StartPosition() *msgpb.MsgPosition
|
||
Type() SegmentType
|
||
Level() datapb.SegmentLevel
|
||
IsSorted() bool
|
||
LoadInfo() *querypb.SegmentLoadInfo
|
||
// PinIfNotReleased the segment to prevent it from being released
|
||
PinIfNotReleased() error
|
||
// Unpin the segment to allow it to be released
|
||
Unpin()
|
||
|
||
// Stats related
|
||
// InsertCount returns the number of inserted rows, not effected by deletion
|
||
InsertCount() int64
|
||
// RowNum returns the number of rows, it's slow, so DO NOT call it in a loop
|
||
RowNum() int64
|
||
MemSize() int64
|
||
// ResourceUsageEstimate returns the estimated resource usage of the segment
|
||
ResourceUsageEstimate() ResourceUsage
|
||
|
||
// Index related
|
||
GetIndexByID(indexID int64) *IndexedFieldInfo
|
||
GetIndex(fieldID int64) []*IndexedFieldInfo
|
||
ExistIndex(fieldID int64) bool
|
||
Indexes() []*IndexedFieldInfo
|
||
HasRawData(fieldID int64) bool
|
||
DropIndex(ctx context.Context, indexID int64) error
|
||
|
||
// Modification related
|
||
Insert(ctx context.Context, rowIDs []int64, timestamps []typeutil.Timestamp, record *segcorepb.InsertRecord) error
|
||
Delete(ctx context.Context, primaryKeys storage.PrimaryKeys, timestamps []typeutil.Timestamp) error
|
||
LoadDeltaData(ctx context.Context, deltaData *storage.DeltaData) error
|
||
LastDeltaTimestamp() uint64
|
||
Load(ctx context.Context) error
|
||
FinishLoad() error
|
||
Release(ctx context.Context, opts ...releaseOption)
|
||
|
||
// Bloom filter related
|
||
SetBloomFilter(bf *pkoracle.BloomFilterSet)
|
||
BloomFilterExist() bool
|
||
UpdateBloomFilter(pks []storage.PrimaryKey)
|
||
MayPkExist(lc *storage.LocationsCache) bool
|
||
BatchPkExist(lc *storage.BatchLocationsCache) []bool
|
||
|
||
// Get min/max
|
||
GetMinPk() *storage.PrimaryKey
|
||
GetMaxPk() *storage.PrimaryKey
|
||
|
||
// BM25 stats
|
||
UpdateBM25Stats(stats map[int64]*storage.BM25Stats)
|
||
GetBM25Stats() map[int64]*storage.BM25Stats
|
||
|
||
// Read operations
|
||
Search(ctx context.Context, searchReq *segcore.SearchRequest) (*segcore.SearchResult, error)
|
||
Retrieve(ctx context.Context, plan *segcore.RetrievePlan) (*segcorepb.RetrieveResults, error)
|
||
RetrieveByOffsets(ctx context.Context, plan *segcore.RetrievePlanWithOffsets) (*segcorepb.RetrieveResults, error)
|
||
IsLazyLoad() bool
|
||
ResetIndexesLazyLoad(lazyState bool)
|
||
|
||
// lazy load related
|
||
NeedUpdatedVersion() int64
|
||
RemoveUnusedFieldFiles() error
|
||
|
||
GetFieldJSONIndexStats() map[int64]*querypb.JsonStatsInfo
|
||
}
|