mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
Related #44956 This commit integrates the Storage V2 FFI (Foreign Function Interface) interface throughout the Milvus codebase, enabling unified storage access through the Loon FFI layer. This is a significant step towards standardizing storage operations across different storage versions. 1. Configuration Support - **configs/milvus.yaml**: Added `useLoonFFI` configuration flag under `common.storage.file.splitByAvgSize` section - Allows runtime toggle between traditional binlog readers and new FFI-based manifest readers - Default: `false` (maintains backward compatibility) 2. Core FFI Infrastructure Enhanced Utilities (internal/core/src/storage/loon_ffi/util.cpp/h) - **ToCStorageConfig()**: Converts Go's `StorageConfig` to C's `CStorageConfig` struct for FFI calls - **GetManifest()**: Parses manifest JSON and retrieves latest column groups using FFI - Accepts manifest path with `base_path` and `ver` fields - Calls `get_latest_column_groups()` FFI function - Returns column group information as string - Comprehensive error handling for JSON parsing and FFI errors 3. Dependency Updates - **internal/core/thirdparty/milvus-storage/CMakeLists.txt**: - Updated milvus-storage version from `0883026` to `302143c` - Ensures compatibility with latest FFI interfaces 4. Data Coordinator Changes All compaction task builders now include manifest path in segment binlogs: - **compaction_task_clustering.go**: Added `Manifest: segInfo.GetManifestPath()` to segment binlogs - **compaction_task_l0.go**: Added manifest path to both L0 segment selection and compaction plan building - **compaction_task_mix.go**: Added manifest path to mixed compaction segment binlogs - **meta.go**: Updated metadata completion logic: - `completeClusterCompactionMutation()`: Set `ManifestPath` in new segment info - `completeMixCompactionMutation()`: Preserve manifest path in compacted segments - `completeSortCompactionMutation()`: Include manifest path in sorted segments 5. Data Node Compactor Enhancements All compactors updated to support dual-mode reading (binlog vs manifest): 6. Flush & Sync Manager Updates Pack Writer V2 (pack_writer_v2.go) - **BulkPackWriterV2.Write()**: Extended return signature to include `manifest string` - Implementation: - Generate manifest path: `path.Join(pack.segmentID, "manifest.json")` - Write packed data using FFI-based writer - Return manifest path along with binlogs, deltas, and stats Task Handling (task.go) - Updated all sync task result handling to accommodate new manifest return value - Ensured backward compatibility for callers not using manifest 7. Go Storage Layer Integration New Interfaces and Implementations - **record_reader.go**: Interface for unified record reading across storage versions - **record_writer.go**: Interface for unified record writing across storage versions - **binlog_record_writer.go**: Concrete implementation for traditional binlog-based writing Enhanced Schema Support (schema.go, schema_test.go) - Schema conversion utilities to support FFI-based storage operations - Ensures proper Arrow schema mapping for V2 storage Serialization Updates - **serde.go, serde_events.go, serde_events_v2.go**: Updated to work with new reader/writer interfaces - Test files updated to validate dual-mode serialization 8. Storage V2 Packed Format FFI Common (storagev2/packed/ffi_common.go) - Common FFI utilities and type conversions for packed storage format Packed Writer FFI (storagev2/packed/packed_writer_ffi.go) - FFI-based implementation of packed writer - Integrates with Loon storage layer for efficient columnar writes Packed Reader FFI (storagev2/packed/packed_reader_ffi.go) - Already existed, now complemented by writer implementation 9. Protocol Buffer Updates data_coord.proto & datapb/data_coord.pb.go - Added `manifest` field to compaction segment messages - Enables passing manifest metadata through compaction pipeline worker.proto & workerpb/worker.pb.go - Added compaction parameter for `useLoonFFI` flag - Allows workers to receive FFI configuration from coordinator 10. Parameter Configuration component_param.go - Added `UseLoonFFI` parameter to compaction configuration - Reads from `common.storage.file.useLoonFFI` config path - Default: `false` for safe rollout 11. Test Updates - **clustering_compactor_storage_v2_test.go**: Updated signatures to handle manifest return value - **mix_compactor_storage_v2_test.go**: Updated test helpers for manifest support - **namespace_compactor_test.go**: Adjusted writer calls to expect manifest - **pack_writer_v2_test.go**: Validated manifest generation in pack writing This integration follows a **dual-mode approach**: 1. **Legacy Path**: Traditional binlog-based reading/writing (when `useLoonFFI=false` or no manifest) 2. **FFI Path**: Manifest-based reading/writing through Loon FFI (when `useLoonFFI=true` and manifest exists) --------- Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
164 lines
5.4 KiB
Go
164 lines
5.4 KiB
Go
package compactor
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow/array"
|
|
"go.opentelemetry.io/otel"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
|
"github.com/milvus-io/milvus/internal/allocator"
|
|
"github.com/milvus-io/milvus/internal/compaction"
|
|
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
|
"github.com/milvus-io/milvus/internal/storage"
|
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
func mergeSortMultipleSegments(ctx context.Context,
|
|
plan *datapb.CompactionPlan,
|
|
collectionID, partitionID, maxRows int64,
|
|
binlogIO io.BinlogIO,
|
|
binlogs []*datapb.CompactionSegmentBinlogs,
|
|
tr *timerecord.TimeRecorder,
|
|
currentTime time.Time,
|
|
collectionTtl int64,
|
|
compactionParams compaction.Params,
|
|
sortByFields []int64,
|
|
) ([]*datapb.CompactionSegment, error) {
|
|
_ = tr.RecordSpan()
|
|
|
|
ctx, span := otel.Tracer(typeutil.DataNodeRole).Start(ctx, "mergeSortMultipleSegments")
|
|
defer span.End()
|
|
|
|
log := log.With(zap.Int64("planID", plan.GetPlanID()))
|
|
|
|
segIDAlloc := allocator.NewLocalAllocator(plan.GetPreAllocatedSegmentIDs().GetBegin(), plan.GetPreAllocatedSegmentIDs().GetEnd())
|
|
logIDAlloc := allocator.NewLocalAllocator(plan.GetPreAllocatedLogIDs().GetBegin(), plan.GetPreAllocatedLogIDs().GetEnd())
|
|
compAlloc := NewCompactionAllocator(segIDAlloc, logIDAlloc)
|
|
writer, err := NewMultiSegmentWriter(ctx, binlogIO, compAlloc, plan.GetMaxSize(), plan.GetSchema(), compactionParams, maxRows, partitionID, collectionID, plan.GetChannel(), 4096,
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
storage.WithUseLoonFFI(compactionParams.UseLoonFFI))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
pkField, err := typeutil.GetPrimaryFieldSchema(plan.GetSchema())
|
|
if err != nil {
|
|
log.Warn("failed to get pk field from schema")
|
|
return nil, err
|
|
}
|
|
|
|
segmentReaders := make([]storage.RecordReader, len(binlogs))
|
|
segmentFilters := make([]compaction.EntityFilter, len(binlogs))
|
|
for i, s := range binlogs {
|
|
var reader storage.RecordReader
|
|
if s.GetManifest() != "" {
|
|
reader, err = storage.NewManifestRecordReader(ctx,
|
|
s.GetManifest(),
|
|
plan.GetSchema(),
|
|
storage.WithCollectionID(collectionID),
|
|
storage.WithDownloader(binlogIO.Download),
|
|
storage.WithVersion(s.StorageVersion),
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
)
|
|
} else {
|
|
reader, err = storage.NewBinlogRecordReader(ctx,
|
|
s.GetFieldBinlogs(),
|
|
plan.GetSchema(),
|
|
storage.WithCollectionID(collectionID),
|
|
storage.WithDownloader(binlogIO.Download),
|
|
storage.WithVersion(s.StorageVersion),
|
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
|
)
|
|
}
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
segmentReaders[i] = reader
|
|
deltalogPaths := make([]string, 0)
|
|
for _, d := range s.GetDeltalogs() {
|
|
for _, l := range d.GetBinlogs() {
|
|
deltalogPaths = append(deltalogPaths, l.GetLogPath())
|
|
}
|
|
}
|
|
delta, err := compaction.ComposeDeleteFromDeltalogs(ctx, binlogIO, deltalogPaths)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
segmentFilters[i] = compaction.NewEntityFilter(delta, collectionTtl, currentTime)
|
|
}
|
|
|
|
defer func() {
|
|
for _, r := range segmentReaders {
|
|
r.Close()
|
|
}
|
|
}()
|
|
|
|
var predicate func(r storage.Record, ri, i int) bool
|
|
switch pkField.DataType {
|
|
case schemapb.DataType_Int64:
|
|
predicate = func(r storage.Record, ri, i int) bool {
|
|
pk := r.Column(pkField.FieldID).(*array.Int64).Value(i)
|
|
ts := r.Column(common.TimeStampField).(*array.Int64).Value(i)
|
|
return !segmentFilters[ri].Filtered(pk, uint64(ts))
|
|
}
|
|
case schemapb.DataType_VarChar:
|
|
predicate = func(r storage.Record, ri, i int) bool {
|
|
pk := r.Column(pkField.FieldID).(*array.String).Value(i)
|
|
ts := r.Column(common.TimeStampField).(*array.Int64).Value(i)
|
|
return !segmentFilters[ri].Filtered(pk, uint64(ts))
|
|
}
|
|
default:
|
|
log.Warn("compaction only support int64 and varchar pk field")
|
|
}
|
|
|
|
if _, err = storage.MergeSort(compactionParams.BinLogMaxSize, plan.GetSchema(), segmentReaders, writer, predicate, sortByFields); err != nil {
|
|
writer.Close()
|
|
return nil, err
|
|
}
|
|
|
|
if err := writer.Close(); err != nil {
|
|
log.Warn("compact wrong, failed to finish writer", zap.Error(err))
|
|
return nil, err
|
|
}
|
|
|
|
res := writer.GetCompactionSegments()
|
|
for _, seg := range res {
|
|
seg.IsSorted = true
|
|
}
|
|
|
|
var (
|
|
deletedRowCount int
|
|
expiredRowCount int
|
|
missingDeleteCount int
|
|
deltalogDeleteEntriesCount int
|
|
)
|
|
|
|
for _, filter := range segmentFilters {
|
|
deletedRowCount += filter.GetDeletedCount()
|
|
expiredRowCount += filter.GetExpiredCount()
|
|
missingDeleteCount += filter.GetMissingDeleteCount()
|
|
deltalogDeleteEntriesCount += filter.GetDeltalogDeleteCount()
|
|
}
|
|
|
|
totalElapse := tr.RecordSpan()
|
|
log.Info("compact mergeSortMultipleSegments end",
|
|
zap.Int("deleted row count", deletedRowCount),
|
|
zap.Int("expired entities", expiredRowCount),
|
|
zap.Int("missing deletes", missingDeleteCount),
|
|
zap.Duration("total elapse", totalElapse))
|
|
|
|
metrics.DataNodeCompactionDeleteCount.WithLabelValues(fmt.Sprint(collectionID)).Add(float64(deltalogDeleteEntriesCount))
|
|
metrics.DataNodeCompactionMissingDeleteCount.WithLabelValues(fmt.Sprint(collectionID)).Add(float64(missingDeleteCount))
|
|
|
|
return res, nil
|
|
}
|