Ted Xu 196006b4ce
enhance: update delta log serialization APIs to integrate storage V2 (#44998)
See #39173

In this PR:

- Adjusted the delta log serialization APIs.
- Refactored the stats collector to improve the collection and digest of
primary key and BM25 statistics.
- Introduced new tests for the delta log reader/writer and stats
collectors to ensure functionality and correctness.

---------

Signed-off-by: Ted Xu <ted.xu@zilliz.com>
2025-10-22 15:58:12 +08:00

410 lines
12 KiB
Go

// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncmgr
import (
"context"
"fmt"
"path"
"github.com/apache/arrow/go/v17/arrow"
"github.com/apache/arrow/go/v17/arrow/array"
"github.com/apache/arrow/go/v17/arrow/memory"
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/flushcommon/metacache"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/pkg/v2/common"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
"github.com/milvus-io/milvus/pkg/v2/util/retry"
)
type PackWriter interface {
Write(ctx context.Context, pack *SyncPack) (
inserts []*datapb.Binlog, deletes *datapb.Binlog, stats *datapb.Binlog, bm25Stats *datapb.Binlog,
size int64, err error)
}
type BulkPackWriter struct {
metaCache metacache.MetaCache
schema *schemapb.CollectionSchema
chunkManager storage.ChunkManager
allocator allocator.Interface
writeRetryOpts []retry.Option
// prefetched log ids
ids []int64
sizeWritten int64
}
func NewBulkPackWriter(metaCache metacache.MetaCache,
schema *schemapb.CollectionSchema,
chunkManager storage.ChunkManager,
allocator allocator.Interface, writeRetryOpts ...retry.Option,
) *BulkPackWriter {
return &BulkPackWriter{
metaCache: metaCache,
schema: schema,
chunkManager: chunkManager,
allocator: allocator,
writeRetryOpts: writeRetryOpts,
}
}
func (bw *BulkPackWriter) Write(ctx context.Context, pack *SyncPack) (
inserts map[int64]*datapb.FieldBinlog,
deltas *datapb.FieldBinlog,
stats map[int64]*datapb.FieldBinlog,
bm25Stats map[int64]*datapb.FieldBinlog,
size int64,
err error,
) {
err = bw.prefetchIDs(pack)
if err != nil {
log.Warn("failed allocate ids for sync task", zap.Error(err))
return
}
if inserts, err = bw.writeInserts(ctx, pack); err != nil {
log.Error("failed to write insert data", zap.Error(err))
return
}
if stats, err = bw.writeStats(ctx, pack); err != nil {
log.Error("failed to process stats blob", zap.Error(err))
return
}
if deltas, err = bw.writeDelta(ctx, pack); err != nil {
log.Error("failed to process delta blob", zap.Error(err))
return
}
if bm25Stats, err = bw.writeBM25Stasts(ctx, pack); err != nil {
log.Error("failed to process bm25 stats blob", zap.Error(err))
return
}
size = bw.sizeWritten
return
}
// prefetchIDs pre-allcates ids depending on the number of blobs current task contains.
func (bw *BulkPackWriter) prefetchIDs(pack *SyncPack) error {
totalIDCount := 0
if len(pack.insertData) > 0 {
totalIDCount += len(pack.insertData[0].Data) * 2 // binlogs and statslogs
}
if pack.isFlush {
totalIDCount++ // merged stats log
}
if pack.deltaData != nil {
totalIDCount++
}
if pack.bm25Stats != nil {
totalIDCount += len(pack.bm25Stats)
if pack.isFlush {
totalIDCount++ // merged bm25 stats
}
}
if totalIDCount == 0 {
return nil
}
start, _, err := bw.allocator.Alloc(uint32(totalIDCount))
if err != nil {
return err
}
bw.ids = lo.RangeFrom(start, totalIDCount)
return nil
}
func (bw *BulkPackWriter) nextID() int64 {
if len(bw.ids) == 0 {
panic("pre-fetched ids exhausted")
}
r := bw.ids[0]
bw.ids = bw.ids[1:]
return r
}
func (bw *BulkPackWriter) writeLog(ctx context.Context, blob *storage.Blob,
root, p string, pack *SyncPack,
) (*datapb.Binlog, error) {
key := path.Join(bw.chunkManager.RootPath(), root, p)
err := retry.Do(ctx, func() error {
return bw.chunkManager.Write(ctx, key, blob.Value)
}, bw.writeRetryOpts...)
if err != nil {
return nil, err
}
size := int64(len(blob.GetValue()))
bw.sizeWritten += size
return &datapb.Binlog{
EntriesNum: blob.RowNum,
TimestampFrom: pack.tsFrom,
TimestampTo: pack.tsTo,
LogPath: key,
LogSize: size,
MemorySize: blob.MemorySize,
}, nil
}
func (bw *BulkPackWriter) writeInserts(ctx context.Context, pack *SyncPack) (map[int64]*datapb.FieldBinlog, error) {
if len(pack.insertData) == 0 {
return make(map[int64]*datapb.FieldBinlog), nil
}
serializer, err := NewStorageSerializer(bw.metaCache, bw.schema)
if err != nil {
return nil, err
}
binlogBlobs, err := serializer.serializeBinlog(ctx, pack)
if err != nil {
return nil, err
}
logs := make(map[int64]*datapb.FieldBinlog)
for fieldID, blob := range binlogBlobs {
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, fieldID, bw.nextID())
binlog, err := bw.writeLog(ctx, blob, common.SegmentInsertLogPath, k, pack)
if err != nil {
return nil, err
}
logs[fieldID] = &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []*datapb.Binlog{binlog},
}
}
return logs, nil
}
func (bw *BulkPackWriter) writeStats(ctx context.Context, pack *SyncPack) (map[int64]*datapb.FieldBinlog, error) {
if len(pack.insertData) == 0 {
// TODO: we should not skip here, if the flush operation don't carry any insert data,
// the merge stats operation will be skipped, which is a bad case.
return make(map[int64]*datapb.FieldBinlog), nil
}
serializer, err := NewStorageSerializer(bw.metaCache, bw.schema)
if err != nil {
return nil, err
}
singlePKStats, batchStatsBlob, err := serializer.serializeStatslog(pack)
if err != nil {
return nil, err
}
actions := []metacache.SegmentAction{metacache.RollStats(singlePKStats)}
bw.metaCache.UpdateSegments(metacache.MergeSegmentAction(actions...), metacache.WithSegmentIDs(pack.segmentID))
pkFieldID := serializer.pkField.GetFieldID()
binlogs := make([]*datapb.Binlog, 0)
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, pkFieldID, bw.nextID())
if binlog, err := bw.writeLog(ctx, batchStatsBlob, common.SegmentStatslogPath, k, pack); err != nil {
return nil, err
} else {
binlogs = append(binlogs, binlog)
}
if pack.isFlush && pack.level != datapb.SegmentLevel_L0 {
mergedStatsBlob, err := serializer.serializeMergedPkStats(pack)
if err != nil {
return nil, err
}
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, pkFieldID, int64(storage.CompoundStatsType))
binlog, err := bw.writeLog(ctx, mergedStatsBlob, common.SegmentStatslogPath, k, pack)
if err != nil {
return nil, err
}
binlogs = append(binlogs, binlog)
}
logs := make(map[int64]*datapb.FieldBinlog)
logs[pkFieldID] = &datapb.FieldBinlog{
FieldID: pkFieldID,
Binlogs: binlogs,
}
return logs, nil
}
func (bw *BulkPackWriter) writeBM25Stasts(ctx context.Context, pack *SyncPack) (map[int64]*datapb.FieldBinlog, error) {
if len(pack.bm25Stats) == 0 {
// TODO: we should not skip here, if the flush operation don't carry any insert data,
// the merge stats operation will be skipped, which is a bad case.
return make(map[int64]*datapb.FieldBinlog), nil
}
serializer, err := NewStorageSerializer(bw.metaCache, bw.schema)
if err != nil {
return nil, err
}
bm25Blobs, err := serializer.serializeBM25Stats(pack)
if err != nil {
return nil, err
}
logs := make(map[int64]*datapb.FieldBinlog)
for fieldID, blob := range bm25Blobs {
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, fieldID, bw.nextID())
binlog, err := bw.writeLog(ctx, blob, common.SegmentBm25LogPath, k, pack)
if err != nil {
return nil, err
}
logs[fieldID] = &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []*datapb.Binlog{binlog},
}
}
actions := []metacache.SegmentAction{metacache.MergeBm25Stats(pack.bm25Stats)}
bw.metaCache.UpdateSegments(metacache.MergeSegmentAction(actions...), metacache.WithSegmentIDs(pack.segmentID))
if pack.isFlush {
if pack.level != datapb.SegmentLevel_L0 {
if hasBM25Function(bw.schema) {
mergedBM25Blob, err := serializer.serializeMergedBM25Stats(pack)
if err != nil {
return nil, err
}
for fieldID, blob := range mergedBM25Blob {
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, fieldID, int64(storage.CompoundStatsType))
binlog, err := bw.writeLog(ctx, blob, common.SegmentBm25LogPath, k, pack)
if err != nil {
return nil, err
}
fieldBinlog, ok := logs[fieldID]
if !ok {
fieldBinlog = &datapb.FieldBinlog{
FieldID: fieldID,
}
logs[fieldID] = fieldBinlog
}
fieldBinlog.Binlogs = append(fieldBinlog.Binlogs, binlog)
}
}
}
}
return logs, nil
}
func (bw *BulkPackWriter) writeDelta(ctx context.Context, pack *SyncPack) (*datapb.FieldBinlog, error) {
if pack.deltaData == nil {
return &datapb.FieldBinlog{}, nil
}
pkField := func() *schemapb.FieldSchema {
for _, field := range bw.schema.Fields {
if field.IsPrimaryKey {
return field
}
}
return nil
}()
if pkField == nil {
return nil, fmt.Errorf("primary key field not found")
}
logID := bw.nextID()
k := metautil.JoinIDPath(pack.collectionID, pack.partitionID, pack.segmentID, logID)
path := path.Join(bw.chunkManager.RootPath(), common.SegmentDeltaLogPath, k)
writer, err := storage.NewDeltalogWriter(
ctx, pack.collectionID, pack.partitionID, pack.segmentID, logID, pkField.DataType, path,
storage.WithUploader(func(ctx context.Context, kvs map[string][]byte) error {
// Get the only blob in the map
if len(kvs) != 1 {
return fmt.Errorf("expected 1 blob, got %d", len(kvs))
}
for _, blob := range kvs {
return bw.chunkManager.Write(ctx, path, blob)
}
return nil
}),
)
if err != nil {
return nil, err
}
pkType := func() arrow.DataType {
switch pkField.DataType {
case schemapb.DataType_Int64:
return arrow.PrimitiveTypes.Int64
case schemapb.DataType_VarChar:
return arrow.BinaryTypes.String
default:
return nil
}
}()
if pkType == nil {
return nil, fmt.Errorf("unexpected pk type %v", pkField.DataType)
}
pkBuilder := array.NewBuilder(memory.DefaultAllocator, pkType)
tsBuilder := array.NewBuilder(memory.DefaultAllocator, arrow.PrimitiveTypes.Int64)
defer pkBuilder.Release()
defer tsBuilder.Release()
for i := int64(0); i < pack.deltaData.RowCount; i++ {
switch pkField.DataType {
case schemapb.DataType_Int64:
pkBuilder.(*array.Int64Builder).Append(pack.deltaData.Pks[i].GetValue().(int64))
case schemapb.DataType_VarChar:
pkBuilder.(*array.StringBuilder).Append(pack.deltaData.Pks[i].GetValue().(string))
default:
return nil, fmt.Errorf("unexpected pk type %v", pkField.DataType)
}
tsBuilder.(*array.Int64Builder).Append(int64(pack.deltaData.Tss[i]))
}
pkArray := pkBuilder.NewArray()
tsArray := tsBuilder.NewArray()
record := storage.NewSimpleArrowRecord(array.NewRecord(arrow.NewSchema([]arrow.Field{
{Name: "pk", Type: pkType},
{Name: "ts", Type: arrow.PrimitiveTypes.Int64},
}, nil), []arrow.Array{pkArray, tsArray}, pack.deltaData.RowCount), map[storage.FieldID]int{
common.RowIDField: 0,
common.TimeStampField: 1,
})
err = writer.Write(record)
if err != nil {
return nil, err
}
err = writer.Close()
if err != nil {
return nil, err
}
deltalog := &datapb.Binlog{
EntriesNum: pack.deltaData.RowCount,
TimestampFrom: pack.tsFrom,
TimestampTo: pack.tsTo,
LogPath: path,
LogSize: pack.deltaData.Size() / 4, // Not used
MemorySize: pack.deltaData.Size(),
}
bw.sizeWritten += deltalog.LogSize
return &datapb.FieldBinlog{
FieldID: pkField.GetFieldID(),
Binlogs: []*datapb.Binlog{deltalog},
}, nil
}