mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
Cherry-pick from master pr: #45061 #45488 #45803 #46017 #44991 #45132 #45723 #45726 #45798 #45897 #45918 #44998 This feature integrates the Storage V2 (Loon) FFI interface as a unified storage layer for segment loading and index building in Milvus. It enables manifest-based data access, replacing the traditional binlog-based approach with a more efficient columnar storage format. Key changes: ### Segment Self-Managed Loading Architecture - Move segment loading orchestration from Go layer to C++ segcore - Add NewSegmentWithLoadInfo() API for passing load info during segment creation - Implement SetLoadInfo() and Load() methods in SegmentInterface - Support parallel loading of indexed and non-indexed fields - Enable both sealed and growing segments to self-manage loading ### Storage V2 FFI Integration - Integrate milvus-storage library's FFI interface for packed columnar data - Add manifest path support throughout the data path (SegmentInfo, LoadInfo) - Implement ManifestReader for generating manifests from binlogs - Support zero-copy data exchange using Arrow C Data Interface - Add ToCStorageConfig() for Go-to-C storage config conversion ### Manifest-Based Index Building - Extend FileManagerContext to carry loon_ffi_properties - Implement GetFieldDatasFromManifest() using Arrow C Stream interface - Support manifest-based reading in DiskFileManagerImpl and MemFileManagerImpl - Add fallback to traditional segment insert files when manifest unavailable ### Compaction Pipeline Updates - Include manifest path in all compaction task builders (clustering, L0, mix) - Update BulkPackWriterV2 to return manifest path - Propagate manifest metadata through compaction pipeline ### Configuration & Protocol - Add common.storageV2.useLoonFFI config option (default: false) - Add manifest_path field to SegmentLoadInfo and related proto messages - Add manifest field to compaction segment messages ### Bug Fixes - Fix mmap settings not applied during segment load (key typo fix) - Populate index info after segment loading to prevent redundant load tasks - Fix memory corruption by removing premature transaction handle destruction Related issues: #44956, #45060, #39173 ## Individual Cherry-Picked Commits 1. **e1c923b5cc** - fix: apply mmap settings correctly during segment load (#46017) 2. **63b912370b** - enhance: use milvus-storage internal C++ Reader API for Loon FFI (#45897) 3. **bfc192faa5** - enhance: Resolve issues integrating loon FFI (#45918) 4. **fb18564631** - enhance: support manifest-based index building with Loon FFI reader (#45726) 5. **b9ec2392b9** - enhance: integrate StorageV2 FFI interface for manifest-based segment loading (#45798) 6. **66db3c32e6** - enhance: integrate Storage V2 FFI interface for unified storage access (#45723) 7. **ae789273ac** - fix: populate index info after segment loading to prevent redundant load tasks (#45803) 8. **49688b0be2** - enhance: Move segment loading logic from Go layer to segcore for self-managed loading (#45488) 9. **5b2df88bac** - enhance: [StorageV2] Integrate FFI interface for packed reader (#45132) 10. **91ff5706ac** - enhance: [StorageV2] add manifest path support for FFI integration (#44991) 11. **2192bb4a85** - enhance: add NewSegmentWithLoadInfo API to support segment self-managed loading (#45061) 12. **4296b01da0** - enhance: update delta log serialization APIs to integrate storage V2 (#44998) ## Technical Details ### Architecture Changes - **Before**: Go layer orchestrated segment loading, making multiple CGO calls - **After**: Segments autonomously manage loading in C++ layer with single entry point ### Storage Access Pattern - **Before**: Read individual binlog files through Go storage layer - **After**: Read manifest file that references packed columnar data via FFI ### Benefits - Reduced cross-language call overhead - Better resource management at C++ level - Improved I/O performance through batched streaming reads - Cleaner separation of concerns between Go and C++ layers - Foundation for proactive schema evolution handling --------- Signed-off-by: Ted Xu <ted.xu@zilliz.com> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com> Co-authored-by: Ted Xu <ted.xu@zilliz.com>
200 lines
6.1 KiB
Go
200 lines
6.1 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package storage
|
|
|
|
import (
|
|
"testing"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow/array"
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
|
)
|
|
|
|
func TestSort(t *testing.T) {
|
|
const batchSize = 64 * 1024 * 1024
|
|
|
|
getReaders := func() []RecordReader {
|
|
blobs, err := generateTestDataWithSeed(10, 3)
|
|
assert.NoError(t, err)
|
|
reader10 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
blobs, err = generateTestDataWithSeed(20, 3)
|
|
assert.NoError(t, err)
|
|
reader20 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
rr := []RecordReader{reader20, reader10}
|
|
return rr
|
|
}
|
|
|
|
lastPK := int64(-1)
|
|
rw := &MockRecordWriter{
|
|
writefn: func(r Record) error {
|
|
pk := r.Column(common.RowIDField).(*array.Int64).Value(0)
|
|
assert.Greater(t, pk, lastPK)
|
|
lastPK = pk
|
|
return nil
|
|
},
|
|
|
|
closefn: func() error {
|
|
lastPK = int64(-1)
|
|
return nil
|
|
},
|
|
}
|
|
|
|
t.Run("sort", func(t *testing.T) {
|
|
gotNumRows, err := Sort(batchSize, generateTestSchema(), getReaders(), rw, func(r Record, ri, i int) bool {
|
|
return true
|
|
}, []int64{common.RowIDField})
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, 6, gotNumRows)
|
|
err = rw.Close()
|
|
assert.NoError(t, err)
|
|
})
|
|
|
|
t.Run("sort with predicate", func(t *testing.T) {
|
|
gotNumRows, err := Sort(batchSize, generateTestSchema(), getReaders(), rw, func(r Record, ri, i int) bool {
|
|
pk := r.Column(common.RowIDField).(*array.Int64).Value(i)
|
|
return pk >= 20
|
|
}, []int64{common.RowIDField})
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, 3, gotNumRows)
|
|
err = rw.Close()
|
|
assert.NoError(t, err)
|
|
})
|
|
}
|
|
|
|
func TestMergeSort(t *testing.T) {
|
|
getReaders := func() []RecordReader {
|
|
blobs, err := generateTestDataWithSeed(1000, 5000)
|
|
assert.NoError(t, err)
|
|
reader10 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
blobs, err = generateTestDataWithSeed(4000, 5000)
|
|
assert.NoError(t, err)
|
|
reader20 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
rr := []RecordReader{reader20, reader10}
|
|
return rr
|
|
}
|
|
|
|
lastPK := int64(-1)
|
|
rw := &MockRecordWriter{
|
|
writefn: func(r Record) error {
|
|
pk := r.Column(common.RowIDField).(*array.Int64).Value(0)
|
|
assert.Greater(t, pk, lastPK)
|
|
lastPK = pk
|
|
return nil
|
|
},
|
|
|
|
closefn: func() error {
|
|
lastPK = int64(-1)
|
|
return nil
|
|
},
|
|
}
|
|
|
|
const batchSize = 64 * 1024 * 1024
|
|
|
|
t.Run("merge sort", func(t *testing.T) {
|
|
gotNumRows, err := MergeSort(batchSize, generateTestSchema(), getReaders(), rw, func(r Record, ri, i int) bool {
|
|
return true
|
|
}, []int64{common.RowIDField})
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, 10000, gotNumRows)
|
|
err = rw.Close()
|
|
assert.NoError(t, err)
|
|
})
|
|
|
|
t.Run("merge sort with predicate", func(t *testing.T) {
|
|
gotNumRows, err := MergeSort(batchSize, generateTestSchema(), getReaders(), rw, func(r Record, ri, i int) bool {
|
|
pk := r.Column(common.RowIDField).(*array.Int64).Value(i)
|
|
// cover a single record (1024 rows) that is deleted, or the last data in the record is deleted
|
|
// index 1023 is deleted. records (1024-2048) and (5000-6023) are all deleted
|
|
return pk < 2000 || (pk >= 3050 && pk < 5000) || pk >= 7000
|
|
}, []int64{common.RowIDField})
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, 5950, gotNumRows)
|
|
err = rw.Close()
|
|
assert.NoError(t, err)
|
|
})
|
|
}
|
|
|
|
// Benchmark sort
|
|
func BenchmarkSort(b *testing.B) {
|
|
batch := 500000
|
|
blobs, err := generateTestDataWithSeed(batch, batch)
|
|
assert.NoError(b, err)
|
|
reader10 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
blobs, err = generateTestDataWithSeed(batch*2+1, batch)
|
|
assert.NoError(b, err)
|
|
reader20 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
rr := []RecordReader{reader20, reader10}
|
|
|
|
rw := &MockRecordWriter{
|
|
writefn: func(r Record) error {
|
|
return nil
|
|
},
|
|
|
|
closefn: func() error {
|
|
return nil
|
|
},
|
|
}
|
|
|
|
const batchSize = 64 * 1024 * 1024
|
|
b.ResetTimer()
|
|
|
|
b.Run("sort", func(b *testing.B) {
|
|
for i := 0; i < b.N; i++ {
|
|
Sort(batchSize, generateTestSchema(), rr, rw, func(r Record, ri, i int) bool {
|
|
return true
|
|
}, []int64{common.RowIDField})
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestSortByMoreThanOneField(t *testing.T) {
|
|
const batchSize = 10000
|
|
sortByFieldIDs := []int64{common.RowIDField, common.TimeStampField}
|
|
|
|
blobs, err := generateTestDataWithSeed(10, batchSize)
|
|
assert.NoError(t, err)
|
|
reader10 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
blobs, err = generateTestDataWithSeed(20, batchSize)
|
|
assert.NoError(t, err)
|
|
reader20 := newIterativeCompositeBinlogRecordReader(generateTestSchema(), nil, MakeBlobsReader(blobs))
|
|
rr := []RecordReader{reader20, reader10}
|
|
|
|
lastPK := int64(-1)
|
|
lastTS := int64(-1)
|
|
rw := &MockRecordWriter{
|
|
writefn: func(r Record) error {
|
|
pk := r.Column(common.RowIDField).(*array.Int64).Value(0)
|
|
ts := r.Column(common.TimeStampField).(*array.Int64).Value(0)
|
|
assert.True(t, pk > lastPK || (pk == lastPK && ts > lastTS))
|
|
lastPK = pk
|
|
return nil
|
|
},
|
|
|
|
closefn: func() error {
|
|
lastPK = int64(-1)
|
|
return nil
|
|
},
|
|
}
|
|
gotNumRows, err := Sort(batchSize, generateTestSchema(), rr, rw, func(r Record, ri, i int) bool {
|
|
return true
|
|
}, sortByFieldIDs)
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, batchSize*2, gotNumRows)
|
|
assert.NoError(t, rw.Close())
|
|
}
|