mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 09:08:43 +08:00
Cherry-pick from master pr: #45061 #45488 #45803 #46017 #44991 #45132 #45723 #45726 #45798 #45897 #45918 #44998 This feature integrates the Storage V2 (Loon) FFI interface as a unified storage layer for segment loading and index building in Milvus. It enables manifest-based data access, replacing the traditional binlog-based approach with a more efficient columnar storage format. Key changes: ### Segment Self-Managed Loading Architecture - Move segment loading orchestration from Go layer to C++ segcore - Add NewSegmentWithLoadInfo() API for passing load info during segment creation - Implement SetLoadInfo() and Load() methods in SegmentInterface - Support parallel loading of indexed and non-indexed fields - Enable both sealed and growing segments to self-manage loading ### Storage V2 FFI Integration - Integrate milvus-storage library's FFI interface for packed columnar data - Add manifest path support throughout the data path (SegmentInfo, LoadInfo) - Implement ManifestReader for generating manifests from binlogs - Support zero-copy data exchange using Arrow C Data Interface - Add ToCStorageConfig() for Go-to-C storage config conversion ### Manifest-Based Index Building - Extend FileManagerContext to carry loon_ffi_properties - Implement GetFieldDatasFromManifest() using Arrow C Stream interface - Support manifest-based reading in DiskFileManagerImpl and MemFileManagerImpl - Add fallback to traditional segment insert files when manifest unavailable ### Compaction Pipeline Updates - Include manifest path in all compaction task builders (clustering, L0, mix) - Update BulkPackWriterV2 to return manifest path - Propagate manifest metadata through compaction pipeline ### Configuration & Protocol - Add common.storageV2.useLoonFFI config option (default: false) - Add manifest_path field to SegmentLoadInfo and related proto messages - Add manifest field to compaction segment messages ### Bug Fixes - Fix mmap settings not applied during segment load (key typo fix) - Populate index info after segment loading to prevent redundant load tasks - Fix memory corruption by removing premature transaction handle destruction Related issues: #44956, #45060, #39173 ## Individual Cherry-Picked Commits 1. **e1c923b5cc** - fix: apply mmap settings correctly during segment load (#46017) 2. **63b912370b** - enhance: use milvus-storage internal C++ Reader API for Loon FFI (#45897) 3. **bfc192faa5** - enhance: Resolve issues integrating loon FFI (#45918) 4. **fb18564631** - enhance: support manifest-based index building with Loon FFI reader (#45726) 5. **b9ec2392b9** - enhance: integrate StorageV2 FFI interface for manifest-based segment loading (#45798) 6. **66db3c32e6** - enhance: integrate Storage V2 FFI interface for unified storage access (#45723) 7. **ae789273ac** - fix: populate index info after segment loading to prevent redundant load tasks (#45803) 8. **49688b0be2** - enhance: Move segment loading logic from Go layer to segcore for self-managed loading (#45488) 9. **5b2df88bac** - enhance: [StorageV2] Integrate FFI interface for packed reader (#45132) 10. **91ff5706ac** - enhance: [StorageV2] add manifest path support for FFI integration (#44991) 11. **2192bb4a85** - enhance: add NewSegmentWithLoadInfo API to support segment self-managed loading (#45061) 12. **4296b01da0** - enhance: update delta log serialization APIs to integrate storage V2 (#44998) ## Technical Details ### Architecture Changes - **Before**: Go layer orchestrated segment loading, making multiple CGO calls - **After**: Segments autonomously manage loading in C++ layer with single entry point ### Storage Access Pattern - **Before**: Read individual binlog files through Go storage layer - **After**: Read manifest file that references packed columnar data via FFI ### Benefits - Reduced cross-language call overhead - Better resource management at C++ level - Improved I/O performance through batched streaming reads - Cleaner separation of concerns between Go and C++ layers - Foundation for proactive schema evolution handling --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com> Co-authored-by: Ted Xu <ted.xu@zilliz.com>
457 lines
14 KiB
Go
457 lines
14 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package storage
|
|
|
|
import (
|
|
"fmt"
|
|
"path"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow"
|
|
"github.com/apache/arrow/go/v17/arrow/array"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
|
"github.com/milvus-io/milvus/internal/allocator"
|
|
"github.com/milvus-io/milvus/internal/storagecommon"
|
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
type BinlogRecordWriter interface {
|
|
RecordWriter
|
|
GetLogs() (
|
|
fieldBinlogs map[FieldID]*datapb.FieldBinlog,
|
|
statsLog *datapb.FieldBinlog,
|
|
bm25StatsLog map[FieldID]*datapb.FieldBinlog,
|
|
manifest string,
|
|
)
|
|
GetRowNum() int64
|
|
FlushChunk() error
|
|
GetBufferUncompressed() uint64
|
|
Schema() *schemapb.CollectionSchema
|
|
}
|
|
|
|
type packedBinlogRecordWriterBase struct {
|
|
// attributes
|
|
collectionID UniqueID
|
|
partitionID UniqueID
|
|
segmentID UniqueID
|
|
schema *schemapb.CollectionSchema
|
|
BlobsWriter ChunkedBlobsWriter
|
|
allocator allocator.Interface
|
|
maxRowNum int64
|
|
arrowSchema *arrow.Schema
|
|
bufferSize int64
|
|
multiPartUploadSize int64
|
|
columnGroups []storagecommon.ColumnGroup
|
|
storageConfig *indexpb.StorageConfig
|
|
storagePluginContext *indexcgopb.StoragePluginContext
|
|
|
|
pkCollector *PkStatsCollector
|
|
bm25Collector *Bm25StatsCollector
|
|
tsFrom typeutil.Timestamp
|
|
tsTo typeutil.Timestamp
|
|
rowNum int64
|
|
writtenUncompressed uint64
|
|
|
|
// results
|
|
fieldBinlogs map[FieldID]*datapb.FieldBinlog
|
|
statsLog *datapb.FieldBinlog
|
|
bm25StatsLog map[FieldID]*datapb.FieldBinlog
|
|
manifest string
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) getColumnStatsFromRecord(r Record, allFields []*schemapb.FieldSchema) map[int64]storagecommon.ColumnStats {
|
|
result := make(map[int64]storagecommon.ColumnStats)
|
|
for _, field := range allFields {
|
|
if arr := r.Column(field.FieldID); arr != nil {
|
|
result[field.FieldID] = storagecommon.ColumnStats{
|
|
AvgSize: int64(arr.Data().SizeInBytes()) / int64(arr.Len()),
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) GetWrittenUncompressed() uint64 {
|
|
return pw.writtenUncompressed
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) writeStats() error {
|
|
// Write PK stats
|
|
pkStatsMap, err := pw.pkCollector.Digest(
|
|
pw.collectionID,
|
|
pw.partitionID,
|
|
pw.segmentID,
|
|
pw.storageConfig.GetRootPath(),
|
|
pw.rowNum,
|
|
pw.allocator,
|
|
pw.BlobsWriter,
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
// Extract single PK stats from map
|
|
for _, statsLog := range pkStatsMap {
|
|
pw.statsLog = statsLog
|
|
break
|
|
}
|
|
|
|
// Write BM25 stats
|
|
bm25StatsLog, err := pw.bm25Collector.Digest(
|
|
pw.collectionID,
|
|
pw.partitionID,
|
|
pw.segmentID,
|
|
pw.storageConfig.GetRootPath(),
|
|
pw.rowNum,
|
|
pw.allocator,
|
|
pw.BlobsWriter,
|
|
)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pw.bm25StatsLog = bm25StatsLog
|
|
|
|
return nil
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) GetLogs() (
|
|
fieldBinlogs map[FieldID]*datapb.FieldBinlog,
|
|
statsLog *datapb.FieldBinlog,
|
|
bm25StatsLog map[FieldID]*datapb.FieldBinlog,
|
|
manifest string,
|
|
) {
|
|
return pw.fieldBinlogs, pw.statsLog, pw.bm25StatsLog, pw.manifest
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) GetRowNum() int64 {
|
|
return pw.rowNum
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) FlushChunk() error {
|
|
return nil // do nothing
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) Schema() *schemapb.CollectionSchema {
|
|
return pw.schema
|
|
}
|
|
|
|
func (pw *packedBinlogRecordWriterBase) GetBufferUncompressed() uint64 {
|
|
return uint64(pw.multiPartUploadSize)
|
|
}
|
|
|
|
var _ BinlogRecordWriter = (*PackedBinlogRecordWriter)(nil)
|
|
|
|
type PackedBinlogRecordWriter struct {
|
|
packedBinlogRecordWriterBase
|
|
writer *packedRecordWriter
|
|
}
|
|
|
|
func (pw *PackedBinlogRecordWriter) Write(r Record) error {
|
|
if err := pw.initWriters(r); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Track timestamps
|
|
tsArray := r.Column(common.TimeStampField).(*array.Int64)
|
|
rows := r.Len()
|
|
for i := 0; i < rows; i++ {
|
|
ts := typeutil.Timestamp(tsArray.Value(i))
|
|
if ts < pw.tsFrom {
|
|
pw.tsFrom = ts
|
|
}
|
|
if ts > pw.tsTo {
|
|
pw.tsTo = ts
|
|
}
|
|
}
|
|
|
|
// Collect statistics
|
|
if err := pw.pkCollector.Collect(r); err != nil {
|
|
return err
|
|
}
|
|
if err := pw.bm25Collector.Collect(r); err != nil {
|
|
return err
|
|
}
|
|
|
|
err := pw.writer.Write(r)
|
|
if err != nil {
|
|
return merr.WrapErrServiceInternal(fmt.Sprintf("write record batch error: %s", err.Error()))
|
|
}
|
|
pw.writtenUncompressed = pw.writer.GetWrittenUncompressed()
|
|
return nil
|
|
}
|
|
|
|
func (pw *PackedBinlogRecordWriter) initWriters(r Record) error {
|
|
if pw.writer == nil {
|
|
if len(pw.columnGroups) == 0 {
|
|
allFields := typeutil.GetAllFieldSchemas(pw.schema)
|
|
pw.columnGroups = storagecommon.SplitColumns(allFields, pw.getColumnStatsFromRecord(r, allFields), storagecommon.DefaultPolicies()...)
|
|
}
|
|
logIdStart, _, err := pw.allocator.Alloc(uint32(len(pw.columnGroups)))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
paths := []string{}
|
|
for _, columnGroup := range pw.columnGroups {
|
|
path := metautil.BuildInsertLogPath(pw.storageConfig.GetRootPath(), pw.collectionID, pw.partitionID, pw.segmentID, columnGroup.GroupID, logIdStart)
|
|
paths = append(paths, path)
|
|
logIdStart++
|
|
}
|
|
pw.writer, err = NewPackedRecordWriter(pw.storageConfig.GetBucketName(), paths, pw.schema, pw.bufferSize, pw.multiPartUploadSize, pw.columnGroups, pw.storageConfig, pw.storagePluginContext)
|
|
if err != nil {
|
|
return merr.WrapErrServiceInternal(fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (pw *PackedBinlogRecordWriter) finalizeBinlogs() {
|
|
if pw.writer == nil {
|
|
return
|
|
}
|
|
pw.rowNum = pw.writer.GetWrittenRowNum()
|
|
if pw.fieldBinlogs == nil {
|
|
pw.fieldBinlogs = make(map[FieldID]*datapb.FieldBinlog, len(pw.columnGroups))
|
|
}
|
|
for _, columnGroup := range pw.columnGroups {
|
|
columnGroupID := columnGroup.GroupID
|
|
if _, exists := pw.fieldBinlogs[columnGroupID]; !exists {
|
|
pw.fieldBinlogs[columnGroupID] = &datapb.FieldBinlog{
|
|
FieldID: columnGroupID,
|
|
ChildFields: columnGroup.Fields,
|
|
}
|
|
}
|
|
pw.fieldBinlogs[columnGroupID].Binlogs = append(pw.fieldBinlogs[columnGroupID].Binlogs, &datapb.Binlog{
|
|
LogSize: int64(pw.writer.GetColumnGroupWrittenCompressed(columnGroupID)),
|
|
MemorySize: int64(pw.writer.GetColumnGroupWrittenUncompressed(columnGroupID)),
|
|
LogPath: pw.writer.GetWrittenPaths(columnGroupID),
|
|
EntriesNum: pw.writer.GetWrittenRowNum(),
|
|
TimestampFrom: pw.tsFrom,
|
|
TimestampTo: pw.tsTo,
|
|
})
|
|
}
|
|
pw.manifest = pw.writer.GetWrittenManifest()
|
|
}
|
|
|
|
func (pw *PackedBinlogRecordWriter) Close() error {
|
|
if pw.writer != nil {
|
|
if err := pw.writer.Close(); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
pw.finalizeBinlogs()
|
|
if err := pw.writeStats(); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func newPackedBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
|
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, maxRowNum int64, bufferSize, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup,
|
|
storageConfig *indexpb.StorageConfig,
|
|
storagePluginContext *indexcgopb.StoragePluginContext,
|
|
) (*PackedBinlogRecordWriter, error) {
|
|
arrowSchema, err := ConvertToArrowSchema(schema, true)
|
|
if err != nil {
|
|
return nil, merr.WrapErrParameterInvalid("convert collection schema [%s] to arrow schema error: %s", schema.Name, err.Error())
|
|
}
|
|
|
|
writer := &PackedBinlogRecordWriter{
|
|
packedBinlogRecordWriterBase: packedBinlogRecordWriterBase{
|
|
collectionID: collectionID,
|
|
partitionID: partitionID,
|
|
segmentID: segmentID,
|
|
schema: schema,
|
|
arrowSchema: arrowSchema,
|
|
BlobsWriter: blobsWriter,
|
|
allocator: allocator,
|
|
maxRowNum: maxRowNum,
|
|
bufferSize: bufferSize,
|
|
multiPartUploadSize: multiPartUploadSize,
|
|
columnGroups: columnGroups,
|
|
storageConfig: storageConfig,
|
|
storagePluginContext: storagePluginContext,
|
|
tsFrom: typeutil.MaxTimestamp,
|
|
tsTo: 0,
|
|
},
|
|
}
|
|
|
|
// Create stats collectors
|
|
writer.pkCollector, err = NewPkStatsCollector(
|
|
collectionID,
|
|
schema,
|
|
maxRowNum,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
writer.bm25Collector = NewBm25StatsCollector(schema)
|
|
|
|
return writer, nil
|
|
}
|
|
|
|
var _ BinlogRecordWriter = (*PackedManifestRecordWriter)(nil)
|
|
|
|
type PackedManifestRecordWriter struct {
|
|
packedBinlogRecordWriterBase
|
|
// writer and stats generated at runtime
|
|
writer *packedRecordManifestWriter
|
|
}
|
|
|
|
func (pw *PackedManifestRecordWriter) Write(r Record) error {
|
|
if err := pw.initWriters(r); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Track timestamps
|
|
tsArray := r.Column(common.TimeStampField).(*array.Int64)
|
|
rows := r.Len()
|
|
for i := 0; i < rows; i++ {
|
|
ts := typeutil.Timestamp(tsArray.Value(i))
|
|
if ts < pw.tsFrom {
|
|
pw.tsFrom = ts
|
|
}
|
|
if ts > pw.tsTo {
|
|
pw.tsTo = ts
|
|
}
|
|
}
|
|
|
|
// Collect statistics
|
|
if err := pw.pkCollector.Collect(r); err != nil {
|
|
return err
|
|
}
|
|
if err := pw.bm25Collector.Collect(r); err != nil {
|
|
return err
|
|
}
|
|
|
|
err := pw.writer.Write(r)
|
|
if err != nil {
|
|
return merr.WrapErrServiceInternal(fmt.Sprintf("write record batch error: %s", err.Error()))
|
|
}
|
|
pw.writtenUncompressed = pw.writer.GetWrittenUncompressed()
|
|
return nil
|
|
}
|
|
|
|
func (pw *PackedManifestRecordWriter) initWriters(r Record) error {
|
|
if pw.writer == nil {
|
|
if len(pw.columnGroups) == 0 {
|
|
allFields := typeutil.GetAllFieldSchemas(pw.schema)
|
|
pw.columnGroups = storagecommon.SplitColumns(allFields, pw.getColumnStatsFromRecord(r, allFields), storagecommon.DefaultPolicies()...)
|
|
}
|
|
|
|
var err error
|
|
k := metautil.JoinIDPath(pw.collectionID, pw.partitionID, pw.segmentID)
|
|
basePath := path.Join(pw.storageConfig.GetRootPath(), common.SegmentInsertLogPath, k)
|
|
pw.writer, err = NewPackedRecordManifestWriter(pw.storageConfig.GetBucketName(), basePath, pw.schema, pw.bufferSize, pw.multiPartUploadSize, pw.columnGroups, pw.storageConfig, pw.storagePluginContext)
|
|
if err != nil {
|
|
return merr.WrapErrServiceInternal(fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (pw *PackedManifestRecordWriter) finalizeBinlogs() {
|
|
if pw.writer == nil {
|
|
return
|
|
}
|
|
pw.rowNum = pw.writer.GetWrittenRowNum()
|
|
if pw.fieldBinlogs == nil {
|
|
pw.fieldBinlogs = make(map[FieldID]*datapb.FieldBinlog, len(pw.columnGroups))
|
|
}
|
|
for _, columnGroup := range pw.columnGroups {
|
|
columnGroupID := columnGroup.GroupID
|
|
if _, exists := pw.fieldBinlogs[columnGroupID]; !exists {
|
|
pw.fieldBinlogs[columnGroupID] = &datapb.FieldBinlog{
|
|
FieldID: columnGroupID,
|
|
ChildFields: columnGroup.Fields,
|
|
}
|
|
}
|
|
pw.fieldBinlogs[columnGroupID].Binlogs = append(pw.fieldBinlogs[columnGroupID].Binlogs, &datapb.Binlog{
|
|
LogSize: int64(pw.writer.GetColumnGroupWrittenCompressed(columnGroupID)),
|
|
MemorySize: int64(pw.writer.GetColumnGroupWrittenUncompressed(columnGroupID)),
|
|
LogPath: pw.writer.GetWrittenPaths(columnGroupID),
|
|
EntriesNum: pw.writer.GetWrittenRowNum(),
|
|
TimestampFrom: pw.tsFrom,
|
|
TimestampTo: pw.tsTo,
|
|
})
|
|
}
|
|
pw.manifest = pw.writer.GetWrittenManifest()
|
|
}
|
|
|
|
func (pw *PackedManifestRecordWriter) Close() error {
|
|
if pw.writer != nil {
|
|
if err := pw.writer.Close(); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
pw.finalizeBinlogs()
|
|
if err := pw.writeStats(); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func newPackedManifestRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
|
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, maxRowNum int64, bufferSize, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup,
|
|
storageConfig *indexpb.StorageConfig,
|
|
storagePluginContext *indexcgopb.StoragePluginContext,
|
|
) (*PackedManifestRecordWriter, error) {
|
|
arrowSchema, err := ConvertToArrowSchema(schema, true)
|
|
if err != nil {
|
|
return nil, merr.WrapErrParameterInvalid("convert collection schema [%s] to arrow schema error: %s", schema.Name, err.Error())
|
|
}
|
|
|
|
writer := &PackedManifestRecordWriter{
|
|
packedBinlogRecordWriterBase: packedBinlogRecordWriterBase{
|
|
collectionID: collectionID,
|
|
partitionID: partitionID,
|
|
segmentID: segmentID,
|
|
schema: schema,
|
|
arrowSchema: arrowSchema,
|
|
BlobsWriter: blobsWriter,
|
|
allocator: allocator,
|
|
maxRowNum: maxRowNum,
|
|
bufferSize: bufferSize,
|
|
multiPartUploadSize: multiPartUploadSize,
|
|
columnGroups: columnGroups,
|
|
storageConfig: storageConfig,
|
|
storagePluginContext: storagePluginContext,
|
|
tsFrom: typeutil.MaxTimestamp,
|
|
tsTo: 0,
|
|
},
|
|
}
|
|
|
|
// Create stats collectors
|
|
writer.pkCollector, err = NewPkStatsCollector(
|
|
collectionID,
|
|
schema,
|
|
maxRowNum,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
writer.bm25Collector = NewBm25StatsCollector(schema)
|
|
|
|
return writer, nil
|
|
}
|