mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 17:48:29 +08:00
Cherry-pick from master pr: #45061 #45488 #45803 #46017 #44991 #45132 #45723 #45726 #45798 #45897 #45918 #44998 This feature integrates the Storage V2 (Loon) FFI interface as a unified storage layer for segment loading and index building in Milvus. It enables manifest-based data access, replacing the traditional binlog-based approach with a more efficient columnar storage format. Key changes: ### Segment Self-Managed Loading Architecture - Move segment loading orchestration from Go layer to C++ segcore - Add NewSegmentWithLoadInfo() API for passing load info during segment creation - Implement SetLoadInfo() and Load() methods in SegmentInterface - Support parallel loading of indexed and non-indexed fields - Enable both sealed and growing segments to self-manage loading ### Storage V2 FFI Integration - Integrate milvus-storage library's FFI interface for packed columnar data - Add manifest path support throughout the data path (SegmentInfo, LoadInfo) - Implement ManifestReader for generating manifests from binlogs - Support zero-copy data exchange using Arrow C Data Interface - Add ToCStorageConfig() for Go-to-C storage config conversion ### Manifest-Based Index Building - Extend FileManagerContext to carry loon_ffi_properties - Implement GetFieldDatasFromManifest() using Arrow C Stream interface - Support manifest-based reading in DiskFileManagerImpl and MemFileManagerImpl - Add fallback to traditional segment insert files when manifest unavailable ### Compaction Pipeline Updates - Include manifest path in all compaction task builders (clustering, L0, mix) - Update BulkPackWriterV2 to return manifest path - Propagate manifest metadata through compaction pipeline ### Configuration & Protocol - Add common.storageV2.useLoonFFI config option (default: false) - Add manifest_path field to SegmentLoadInfo and related proto messages - Add manifest field to compaction segment messages ### Bug Fixes - Fix mmap settings not applied during segment load (key typo fix) - Populate index info after segment loading to prevent redundant load tasks - Fix memory corruption by removing premature transaction handle destruction Related issues: #44956, #45060, #39173 ## Individual Cherry-Picked Commits 1. **e1c923b5cc** - fix: apply mmap settings correctly during segment load (#46017) 2. **63b912370b** - enhance: use milvus-storage internal C++ Reader API for Loon FFI (#45897) 3. **bfc192faa5** - enhance: Resolve issues integrating loon FFI (#45918) 4. **fb18564631** - enhance: support manifest-based index building with Loon FFI reader (#45726) 5. **b9ec2392b9** - enhance: integrate StorageV2 FFI interface for manifest-based segment loading (#45798) 6. **66db3c32e6** - enhance: integrate Storage V2 FFI interface for unified storage access (#45723) 7. **ae789273ac** - fix: populate index info after segment loading to prevent redundant load tasks (#45803) 8. **49688b0be2** - enhance: Move segment loading logic from Go layer to segcore for self-managed loading (#45488) 9. **5b2df88bac** - enhance: [StorageV2] Integrate FFI interface for packed reader (#45132) 10. **91ff5706ac** - enhance: [StorageV2] add manifest path support for FFI integration (#44991) 11. **2192bb4a85** - enhance: add NewSegmentWithLoadInfo API to support segment self-managed loading (#45061) 12. **4296b01da0** - enhance: update delta log serialization APIs to integrate storage V2 (#44998) ## Technical Details ### Architecture Changes - **Before**: Go layer orchestrated segment loading, making multiple CGO calls - **After**: Segments autonomously manage loading in C++ layer with single entry point ### Storage Access Pattern - **Before**: Read individual binlog files through Go storage layer - **After**: Read manifest file that references packed columnar data via FFI ### Benefits - Reduced cross-language call overhead - Better resource management at C++ level - Improved I/O performance through batched streaming reads - Cleaner separation of concerns between Go and C++ layers - Foundation for proactive schema evolution handling --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com> Co-authored-by: Ted Xu <ted.xu@zilliz.com>
164 lines
5.4 KiB
Go
164 lines
5.4 KiB
Go
package compactor
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow/array"
|
|
"go.opentelemetry.io/otel"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
|
"github.com/milvus-io/milvus/internal/allocator"
|
|
"github.com/milvus-io/milvus/internal/compaction"
|
|
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
|
"github.com/milvus-io/milvus/internal/storage"
|
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
func mergeSortMultipleSegments(ctx context.Context,
|
|
plan *datapb.CompactionPlan,
|
|
collectionID, partitionID, maxRows int64,
|
|
binlogIO io.BinlogIO,
|
|
binlogs []*datapb.CompactionSegmentBinlogs,
|
|
tr *timerecord.TimeRecorder,
|
|
currentTime time.Time,
|
|
collectionTtl int64,
|
|
compactionParams compaction.Params,
|
|
sortByFields []int64,
|
|
) ([]*datapb.CompactionSegment, error) {
|
|
_ = tr.RecordSpan()
|
|
|
|
ctx, span := otel.Tracer(typeutil.DataNodeRole).Start(ctx, "mergeSortMultipleSegments")
|
|
defer span.End()
|
|
|
|
log := log.With(zap.Int64("planID", plan.GetPlanID()))
|
|
|
|
segIDAlloc := allocator.NewLocalAllocator(plan.GetPreAllocatedSegmentIDs().GetBegin(), plan.GetPreAllocatedSegmentIDs().GetEnd())
|
|
logIDAlloc := allocator.NewLocalAllocator(plan.GetPreAllocatedLogIDs().GetBegin(), plan.GetPreAllocatedLogIDs().GetEnd())
|
|
compAlloc := NewCompactionAllocator(segIDAlloc, logIDAlloc)
|
|
writer, err := NewMultiSegmentWriter(ctx, binlogIO, compAlloc, plan.GetMaxSize(), plan.GetSchema(), compactionParams, maxRows, partitionID, collectionID, plan.GetChannel(), 4096,
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
storage.WithUseLoonFFI(compactionParams.UseLoonFFI))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
pkField, err := typeutil.GetPrimaryFieldSchema(plan.GetSchema())
|
|
if err != nil {
|
|
log.Warn("failed to get pk field from schema")
|
|
return nil, err
|
|
}
|
|
|
|
segmentReaders := make([]storage.RecordReader, len(binlogs))
|
|
segmentFilters := make([]compaction.EntityFilter, len(binlogs))
|
|
for i, s := range binlogs {
|
|
var reader storage.RecordReader
|
|
if s.GetManifest() != "" {
|
|
reader, err = storage.NewManifestRecordReader(ctx,
|
|
s.GetManifest(),
|
|
plan.GetSchema(),
|
|
storage.WithCollectionID(collectionID),
|
|
storage.WithDownloader(binlogIO.Download),
|
|
storage.WithVersion(s.StorageVersion),
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
)
|
|
} else {
|
|
reader, err = storage.NewBinlogRecordReader(ctx,
|
|
s.GetFieldBinlogs(),
|
|
plan.GetSchema(),
|
|
storage.WithCollectionID(collectionID),
|
|
storage.WithDownloader(binlogIO.Download),
|
|
storage.WithVersion(s.StorageVersion),
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
)
|
|
}
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
segmentReaders[i] = reader
|
|
deltalogPaths := make([]string, 0)
|
|
for _, d := range s.GetDeltalogs() {
|
|
for _, l := range d.GetBinlogs() {
|
|
deltalogPaths = append(deltalogPaths, l.GetLogPath())
|
|
}
|
|
}
|
|
delta, err := compaction.ComposeDeleteFromDeltalogs(ctx, binlogIO, deltalogPaths)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
segmentFilters[i] = compaction.NewEntityFilter(delta, collectionTtl, currentTime)
|
|
}
|
|
|
|
defer func() {
|
|
for _, r := range segmentReaders {
|
|
r.Close()
|
|
}
|
|
}()
|
|
|
|
var predicate func(r storage.Record, ri, i int) bool
|
|
switch pkField.DataType {
|
|
case schemapb.DataType_Int64:
|
|
predicate = func(r storage.Record, ri, i int) bool {
|
|
pk := r.Column(pkField.FieldID).(*array.Int64).Value(i)
|
|
ts := r.Column(common.TimeStampField).(*array.Int64).Value(i)
|
|
return !segmentFilters[ri].Filtered(pk, uint64(ts))
|
|
}
|
|
case schemapb.DataType_VarChar:
|
|
predicate = func(r storage.Record, ri, i int) bool {
|
|
pk := r.Column(pkField.FieldID).(*array.String).Value(i)
|
|
ts := r.Column(common.TimeStampField).(*array.Int64).Value(i)
|
|
return !segmentFilters[ri].Filtered(pk, uint64(ts))
|
|
}
|
|
default:
|
|
log.Warn("compaction only support int64 and varchar pk field")
|
|
}
|
|
|
|
if _, err = storage.MergeSort(compactionParams.BinLogMaxSize, plan.GetSchema(), segmentReaders, writer, predicate, sortByFields); err != nil {
|
|
writer.Close()
|
|
return nil, err
|
|
}
|
|
|
|
if err := writer.Close(); err != nil {
|
|
log.Warn("compact wrong, failed to finish writer", zap.Error(err))
|
|
return nil, err
|
|
}
|
|
|
|
res := writer.GetCompactionSegments()
|
|
for _, seg := range res {
|
|
seg.IsSorted = true
|
|
}
|
|
|
|
var (
|
|
deletedRowCount int
|
|
expiredRowCount int
|
|
missingDeleteCount int
|
|
deltalogDeleteEntriesCount int
|
|
)
|
|
|
|
for _, filter := range segmentFilters {
|
|
deletedRowCount += filter.GetDeletedCount()
|
|
expiredRowCount += filter.GetExpiredCount()
|
|
missingDeleteCount += filter.GetMissingDeleteCount()
|
|
deltalogDeleteEntriesCount += filter.GetDeltalogDeleteCount()
|
|
}
|
|
|
|
totalElapse := tr.RecordSpan()
|
|
log.Info("compact mergeSortMultipleSegments end",
|
|
zap.Int("deleted row count", deletedRowCount),
|
|
zap.Int("expired entities", expiredRowCount),
|
|
zap.Int("missing deletes", missingDeleteCount),
|
|
zap.Duration("total elapse", totalElapse))
|
|
|
|
metrics.DataNodeCompactionDeleteCount.WithLabelValues(fmt.Sprint(collectionID)).Add(float64(deltalogDeleteEntriesCount))
|
|
metrics.DataNodeCompactionMissingDeleteCount.WithLabelValues(fmt.Sprint(collectionID)).Add(float64(missingDeleteCount))
|
|
|
|
return res, nil
|
|
}
|