mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 09:38:39 +08:00
Cherry-pick from master pr: #45061 #45488 #45803 #46017 #44991 #45132 #45723 #45726 #45798 #45897 #45918 #44998 This feature integrates the Storage V2 (Loon) FFI interface as a unified storage layer for segment loading and index building in Milvus. It enables manifest-based data access, replacing the traditional binlog-based approach with a more efficient columnar storage format. Key changes: ### Segment Self-Managed Loading Architecture - Move segment loading orchestration from Go layer to C++ segcore - Add NewSegmentWithLoadInfo() API for passing load info during segment creation - Implement SetLoadInfo() and Load() methods in SegmentInterface - Support parallel loading of indexed and non-indexed fields - Enable both sealed and growing segments to self-manage loading ### Storage V2 FFI Integration - Integrate milvus-storage library's FFI interface for packed columnar data - Add manifest path support throughout the data path (SegmentInfo, LoadInfo) - Implement ManifestReader for generating manifests from binlogs - Support zero-copy data exchange using Arrow C Data Interface - Add ToCStorageConfig() for Go-to-C storage config conversion ### Manifest-Based Index Building - Extend FileManagerContext to carry loon_ffi_properties - Implement GetFieldDatasFromManifest() using Arrow C Stream interface - Support manifest-based reading in DiskFileManagerImpl and MemFileManagerImpl - Add fallback to traditional segment insert files when manifest unavailable ### Compaction Pipeline Updates - Include manifest path in all compaction task builders (clustering, L0, mix) - Update BulkPackWriterV2 to return manifest path - Propagate manifest metadata through compaction pipeline ### Configuration & Protocol - Add common.storageV2.useLoonFFI config option (default: false) - Add manifest_path field to SegmentLoadInfo and related proto messages - Add manifest field to compaction segment messages ### Bug Fixes - Fix mmap settings not applied during segment load (key typo fix) - Populate index info after segment loading to prevent redundant load tasks - Fix memory corruption by removing premature transaction handle destruction Related issues: #44956, #45060, #39173 ## Individual Cherry-Picked Commits 1. **e1c923b5cc** - fix: apply mmap settings correctly during segment load (#46017) 2. **63b912370b** - enhance: use milvus-storage internal C++ Reader API for Loon FFI (#45897) 3. **bfc192faa5** - enhance: Resolve issues integrating loon FFI (#45918) 4. **fb18564631** - enhance: support manifest-based index building with Loon FFI reader (#45726) 5. **b9ec2392b9** - enhance: integrate StorageV2 FFI interface for manifest-based segment loading (#45798) 6. **66db3c32e6** - enhance: integrate Storage V2 FFI interface for unified storage access (#45723) 7. **ae789273ac** - fix: populate index info after segment loading to prevent redundant load tasks (#45803) 8. **49688b0be2** - enhance: Move segment loading logic from Go layer to segcore for self-managed loading (#45488) 9. **5b2df88bac** - enhance: [StorageV2] Integrate FFI interface for packed reader (#45132) 10. **91ff5706ac** - enhance: [StorageV2] add manifest path support for FFI integration (#44991) 11. **2192bb4a85** - enhance: add NewSegmentWithLoadInfo API to support segment self-managed loading (#45061) 12. **4296b01da0** - enhance: update delta log serialization APIs to integrate storage V2 (#44998) ## Technical Details ### Architecture Changes - **Before**: Go layer orchestrated segment loading, making multiple CGO calls - **After**: Segments autonomously manage loading in C++ layer with single entry point ### Storage Access Pattern - **Before**: Read individual binlog files through Go storage layer - **After**: Read manifest file that references packed columnar data via FFI ### Benefits - Reduced cross-language call overhead - Better resource management at C++ level - Improved I/O performance through batched streaming reads - Cleaner separation of concerns between Go and C++ layers - Foundation for proactive schema evolution handling --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com> Co-authored-by: Ted Xu <ted.xu@zilliz.com>
371 lines
12 KiB
Go
371 lines
12 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package storage
|
|
|
|
import (
|
|
"fmt"
|
|
"path"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow"
|
|
"github.com/apache/arrow/go/v17/arrow/array"
|
|
"github.com/samber/lo"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
|
"github.com/milvus-io/milvus/internal/storagecommon"
|
|
"github.com/milvus-io/milvus/internal/storagev2/packed"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
var _ RecordWriter = (*packedRecordWriter)(nil)
|
|
|
|
type packedRecordWriter struct {
|
|
writer *packed.PackedWriter
|
|
bufferSize int64
|
|
columnGroups []storagecommon.ColumnGroup
|
|
bucketName string
|
|
pathsMap map[typeutil.UniqueID]string
|
|
schema *schemapb.CollectionSchema
|
|
arrowSchema *arrow.Schema
|
|
rowNum int64
|
|
writtenUncompressed uint64
|
|
columnGroupUncompressed map[typeutil.UniqueID]uint64
|
|
columnGroupCompressed map[typeutil.UniqueID]uint64
|
|
outputManifest string
|
|
storageConfig *indexpb.StorageConfig
|
|
}
|
|
|
|
func (pw *packedRecordWriter) Write(r Record) error {
|
|
var rec arrow.Record
|
|
sar, ok := r.(*simpleArrowRecord)
|
|
if !ok {
|
|
// Get all fields including struct sub-fields
|
|
allFields := typeutil.GetAllFieldSchemas(pw.schema)
|
|
arrays := make([]arrow.Array, len(allFields))
|
|
for i, field := range allFields {
|
|
arrays[i] = r.Column(field.FieldID)
|
|
}
|
|
rec = array.NewRecord(pw.arrowSchema, arrays, int64(r.Len()))
|
|
} else {
|
|
rec = sar.r
|
|
}
|
|
pw.rowNum += int64(r.Len())
|
|
for col, arr := range rec.Columns() {
|
|
// size := arr.Data().SizeInBytes()
|
|
size := calculateActualDataSize(arr)
|
|
pw.writtenUncompressed += size
|
|
for _, columnGroup := range pw.columnGroups {
|
|
if lo.Contains(columnGroup.Columns, col) {
|
|
pw.columnGroupUncompressed[columnGroup.GroupID] += size
|
|
break
|
|
}
|
|
}
|
|
}
|
|
defer rec.Release()
|
|
return pw.writer.WriteRecordBatch(rec)
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetWrittenUncompressed() uint64 {
|
|
return pw.writtenUncompressed
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetColumnGroupWrittenUncompressed(columnGroup typeutil.UniqueID) uint64 {
|
|
if size, ok := pw.columnGroupUncompressed[columnGroup]; ok {
|
|
return size
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetColumnGroupWrittenCompressed(columnGroup typeutil.UniqueID) uint64 {
|
|
if size, ok := pw.columnGroupCompressed[columnGroup]; ok {
|
|
return size
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetWrittenPaths(columnGroup typeutil.UniqueID) string {
|
|
if path, ok := pw.pathsMap[columnGroup]; ok {
|
|
return path
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetWrittenManifest() string {
|
|
return pw.outputManifest
|
|
}
|
|
|
|
func (pw *packedRecordWriter) GetWrittenRowNum() int64 {
|
|
return pw.rowNum
|
|
}
|
|
|
|
func (pw *packedRecordWriter) Close() error {
|
|
if pw.writer != nil {
|
|
err := pw.writer.Close()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for id, fpath := range pw.pathsMap {
|
|
truePath := path.Join(pw.bucketName, fpath)
|
|
size, err := packed.GetFileSize(truePath, pw.storageConfig)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pw.columnGroupCompressed[id] = uint64(size)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func NewPackedRecordWriter(
|
|
bucketName string,
|
|
paths []string,
|
|
schema *schemapb.CollectionSchema,
|
|
bufferSize int64,
|
|
multiPartUploadSize int64,
|
|
columnGroups []storagecommon.ColumnGroup,
|
|
storageConfig *indexpb.StorageConfig,
|
|
storagePluginContext *indexcgopb.StoragePluginContext,
|
|
) (*packedRecordWriter, error) {
|
|
// Validate PK field exists before proceeding
|
|
_, err := typeutil.GetPrimaryFieldSchema(schema)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
arrowSchema, err := ConvertToArrowSchema(schema, false)
|
|
if err != nil {
|
|
return nil, merr.WrapErrServiceInternal(
|
|
fmt.Sprintf("can not convert collection schema %s to arrow schema: %s", schema.Name, err.Error()))
|
|
}
|
|
// if storage config is not passed, use common config
|
|
storageType := paramtable.Get().CommonCfg.StorageType.GetValue()
|
|
if storageConfig != nil {
|
|
storageType = storageConfig.GetStorageType()
|
|
}
|
|
// compose true path before create packed writer here
|
|
// and returned writtenPaths shall remain untouched
|
|
truePaths := lo.Map(paths, func(p string, _ int) string {
|
|
if storageType == "local" {
|
|
return p
|
|
}
|
|
return path.Join(bucketName, p)
|
|
})
|
|
writer, err := packed.NewPackedWriter(truePaths, arrowSchema, bufferSize, multiPartUploadSize, columnGroups, storageConfig, storagePluginContext)
|
|
if err != nil {
|
|
return nil, merr.WrapErrServiceInternal(
|
|
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
|
}
|
|
columnGroupUncompressed := make(map[typeutil.UniqueID]uint64)
|
|
columnGroupCompressed := make(map[typeutil.UniqueID]uint64)
|
|
pathsMap := make(map[typeutil.UniqueID]string)
|
|
if len(paths) != len(columnGroups) {
|
|
return nil, merr.WrapErrParameterInvalid(len(paths), len(columnGroups),
|
|
"paths length is not equal to column groups length for packed record writer")
|
|
}
|
|
for i, columnGroup := range columnGroups {
|
|
columnGroupUncompressed[columnGroup.GroupID] = 0
|
|
columnGroupCompressed[columnGroup.GroupID] = 0
|
|
pathsMap[columnGroup.GroupID] = paths[i]
|
|
}
|
|
return &packedRecordWriter{
|
|
writer: writer,
|
|
schema: schema,
|
|
arrowSchema: arrowSchema,
|
|
bufferSize: bufferSize,
|
|
bucketName: bucketName,
|
|
pathsMap: pathsMap,
|
|
columnGroups: columnGroups,
|
|
columnGroupUncompressed: columnGroupUncompressed,
|
|
columnGroupCompressed: columnGroupCompressed,
|
|
storageConfig: storageConfig,
|
|
}, nil
|
|
}
|
|
|
|
type packedRecordManifestWriter struct {
|
|
writer *packed.FFIPackedWriter
|
|
bufferSize int64
|
|
columnGroups []storagecommon.ColumnGroup
|
|
bucketName string
|
|
pathsMap map[typeutil.UniqueID]string
|
|
schema *schemapb.CollectionSchema
|
|
arrowSchema *arrow.Schema
|
|
rowNum int64
|
|
writtenUncompressed uint64
|
|
columnGroupUncompressed map[typeutil.UniqueID]uint64
|
|
columnGroupCompressed map[typeutil.UniqueID]uint64
|
|
outputManifest string
|
|
storageConfig *indexpb.StorageConfig
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) Write(r Record) error {
|
|
var rec arrow.Record
|
|
sar, ok := r.(*simpleArrowRecord)
|
|
if !ok {
|
|
// Get all fields including struct sub-fields
|
|
allFields := typeutil.GetAllFieldSchemas(pw.schema)
|
|
arrays := make([]arrow.Array, len(allFields))
|
|
for i, field := range allFields {
|
|
arrays[i] = r.Column(field.FieldID)
|
|
}
|
|
rec = array.NewRecord(pw.arrowSchema, arrays, int64(r.Len()))
|
|
} else {
|
|
rec = sar.r
|
|
}
|
|
pw.rowNum += int64(r.Len())
|
|
for col, arr := range rec.Columns() {
|
|
// size := arr.Data().SizeInBytes()
|
|
size := calculateActualDataSize(arr)
|
|
pw.writtenUncompressed += size
|
|
for _, columnGroup := range pw.columnGroups {
|
|
if lo.Contains(columnGroup.Columns, col) {
|
|
pw.columnGroupUncompressed[columnGroup.GroupID] += size
|
|
break
|
|
}
|
|
}
|
|
}
|
|
defer rec.Release()
|
|
return pw.writer.WriteRecordBatch(rec)
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetWrittenUncompressed() uint64 {
|
|
return pw.writtenUncompressed
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetColumnGroupWrittenUncompressed(columnGroup typeutil.UniqueID) uint64 {
|
|
if size, ok := pw.columnGroupUncompressed[columnGroup]; ok {
|
|
return size
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetColumnGroupWrittenCompressed(columnGroup typeutil.UniqueID) uint64 {
|
|
if size, ok := pw.columnGroupCompressed[columnGroup]; ok {
|
|
return size
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetWrittenPaths(columnGroup typeutil.UniqueID) string {
|
|
if path, ok := pw.pathsMap[columnGroup]; ok {
|
|
return path
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetWrittenManifest() string {
|
|
return pw.outputManifest
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) GetWrittenRowNum() int64 {
|
|
return pw.rowNum
|
|
}
|
|
|
|
func (pw *packedRecordManifestWriter) Close() error {
|
|
if pw.writer != nil {
|
|
manifest, err := pw.writer.Close()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pw.outputManifest = manifest
|
|
for id := range pw.pathsMap {
|
|
pw.columnGroupCompressed[id] = uint64(0)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func NewPackedRecordManifestWriter(
|
|
bucketName string,
|
|
basePath string,
|
|
schema *schemapb.CollectionSchema,
|
|
bufferSize int64,
|
|
multiPartUploadSize int64,
|
|
columnGroups []storagecommon.ColumnGroup,
|
|
storageConfig *indexpb.StorageConfig,
|
|
storagePluginContext *indexcgopb.StoragePluginContext,
|
|
) (*packedRecordManifestWriter, error) {
|
|
// Validate PK field exists before proceeding
|
|
_, err := typeutil.GetPrimaryFieldSchema(schema)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
arrowSchema, err := ConvertToArrowSchema(schema, true)
|
|
if err != nil {
|
|
return nil, merr.WrapErrServiceInternal(
|
|
fmt.Sprintf("can not convert collection schema %s to arrow schema: %s", schema.Name, err.Error()))
|
|
}
|
|
// if storage config is not passed, use common config
|
|
storageType := paramtable.Get().CommonCfg.StorageType.GetValue()
|
|
if storageConfig != nil {
|
|
storageType = storageConfig.GetStorageType()
|
|
}
|
|
ffiBasePath := basePath
|
|
if storageType != "local" {
|
|
ffiBasePath = path.Join(bucketName, basePath)
|
|
}
|
|
writer, err := packed.NewFFIPackedWriter(ffiBasePath, arrowSchema, columnGroups, storageConfig, storagePluginContext)
|
|
if err != nil {
|
|
return nil, merr.WrapErrServiceInternal(
|
|
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
|
}
|
|
columnGroupUncompressed := make(map[typeutil.UniqueID]uint64)
|
|
columnGroupCompressed := make(map[typeutil.UniqueID]uint64)
|
|
|
|
// provide mock path
|
|
pathsMap := make(map[typeutil.UniqueID]string)
|
|
start := time.Now().UnixNano()
|
|
for _, columnGroup := range columnGroups {
|
|
columnGroupUncompressed[columnGroup.GroupID] = 0
|
|
columnGroupCompressed[columnGroup.GroupID] = 0
|
|
start++
|
|
pathsMap[columnGroup.GroupID] = path.Join(basePath, strconv.FormatInt(columnGroup.GroupID, 10), strconv.FormatInt(start, 10))
|
|
}
|
|
|
|
return &packedRecordManifestWriter{
|
|
writer: writer,
|
|
schema: schema,
|
|
arrowSchema: arrowSchema,
|
|
bufferSize: bufferSize,
|
|
bucketName: bucketName,
|
|
pathsMap: pathsMap,
|
|
columnGroups: columnGroups,
|
|
columnGroupUncompressed: columnGroupUncompressed,
|
|
columnGroupCompressed: columnGroupCompressed,
|
|
storageConfig: storageConfig,
|
|
}, nil
|
|
}
|
|
|
|
// Deprecated, todo remove
|
|
func NewPackedSerializeWriter(bucketName string, paths []string, schema *schemapb.CollectionSchema, bufferSize int64,
|
|
multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, batchSize int,
|
|
) (*SerializeWriterImpl[*Value], error) {
|
|
packedRecordWriter, err := NewPackedRecordWriter(bucketName, paths, schema, bufferSize, multiPartUploadSize, columnGroups, nil, nil)
|
|
if err != nil {
|
|
return nil, merr.WrapErrServiceInternal(
|
|
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
|
}
|
|
return NewSerializeRecordWriter(packedRecordWriter, func(v []*Value) (Record, error) {
|
|
return ValueSerializer(v, schema)
|
|
}, batchSize), nil
|
|
}
|