milvus/internal/metastore/model/collection.go
Zhen Ye 7c575a18b0
enhance: support AckSyncUp for broadcaster, and enable it in truncate api (#46313)
issue: #43897
also for issue: #46166

add ack_sync_up flag into broadcast message header, which indicates that
whether the broadcast operation is need to be synced up between the
streaming node and the coordinator.
If the ack_sync_up is false, the broadcast operation will be acked once
the recovery storage see the message at current vchannel, the fast ack
operation can be applied to speed up the broadcast operation.
If the ack_sync_up is true, the broadcast operation will be acked after
the checkpoint of current vchannel reach current message.
The fast ack operation can not be applied to speed up the broadcast
operation, because the ack operation need to be synced up with streaming
node.
e.g. if truncate collection operation want to call ack once callback
after the all segment are flushed at current vchannel, it should set the
ack_sync_up to be true.

TODO: current implementation doesn't promise the ack sync up semantic,
it only promise FastAck operation will not be applied, wait for 3.0 to
implement the ack sync up semantic. only for truncate api now.

---------

Signed-off-by: chyezh <chyezh@outlook.com>
2025-12-17 16:55:17 +08:00

344 lines
12 KiB
Go

// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"github.com/samber/lo"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/pkg/v2/common"
pb "github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
)
// TODO: These collection is dirty implementation and easy to be broken, we should drop it in the future.
type Collection struct {
TenantID string
DBID int64
CollectionID int64
Partitions []*Partition
Name string
DBName string
Description string
AutoID bool
Fields []*Field
StructArrayFields []*StructArrayField
Functions []*Function
VirtualChannelNames []string
PhysicalChannelNames []string
ShardsNum int32
StartPositions []*commonpb.KeyDataPair
CreateTime uint64
ConsistencyLevel commonpb.ConsistencyLevel
Aliases []string // TODO: deprecate this.
Properties []*commonpb.KeyValuePair
State pb.CollectionState
EnableDynamicField bool
UpdateTimestamp uint64
SchemaVersion int32
ShardInfos map[string]*ShardInfo
}
type ShardInfo struct {
PChannelName string // the pchannel name of the shard, it is the same with the physical channel name.
VChannelName string // the vchannel name of the shard, it is the same with the virtual channel name.
LastTruncateTimeTick uint64 // the last truncate time tick of the shard, if the shard is not truncated, the value is 0.
}
func (c *Collection) Available() bool {
return c.State == pb.CollectionState_CollectionCreated
}
func (c *Collection) ShallowClone() *Collection {
return &Collection{
TenantID: c.TenantID,
DBID: c.DBID,
CollectionID: c.CollectionID,
Name: c.Name,
DBName: c.DBName,
Description: c.Description,
AutoID: c.AutoID,
Fields: c.Fields,
StructArrayFields: c.StructArrayFields,
Partitions: c.Partitions,
VirtualChannelNames: c.VirtualChannelNames,
PhysicalChannelNames: c.PhysicalChannelNames,
ShardsNum: c.ShardsNum,
ConsistencyLevel: c.ConsistencyLevel,
CreateTime: c.CreateTime,
StartPositions: c.StartPositions,
Aliases: c.Aliases,
Properties: c.Properties,
State: c.State,
EnableDynamicField: c.EnableDynamicField,
Functions: c.Functions,
UpdateTimestamp: c.UpdateTimestamp,
SchemaVersion: c.SchemaVersion,
ShardInfos: c.ShardInfos,
}
}
func (c *Collection) Clone() *Collection {
shardInfos := make(map[string]*ShardInfo, len(c.ShardInfos))
for channelName, shardInfo := range c.ShardInfos {
shardInfos[channelName] = &ShardInfo{
VChannelName: channelName,
PChannelName: shardInfo.PChannelName,
LastTruncateTimeTick: shardInfo.LastTruncateTimeTick,
}
}
return &Collection{
TenantID: c.TenantID,
DBID: c.DBID,
CollectionID: c.CollectionID,
Name: c.Name,
DBName: c.DBName,
Description: c.Description,
AutoID: c.AutoID,
Fields: CloneFields(c.Fields),
StructArrayFields: CloneStructArrayFields(c.StructArrayFields),
Partitions: ClonePartitions(c.Partitions),
VirtualChannelNames: common.CloneStringList(c.VirtualChannelNames),
PhysicalChannelNames: common.CloneStringList(c.PhysicalChannelNames),
ShardsNum: c.ShardsNum,
ConsistencyLevel: c.ConsistencyLevel,
CreateTime: c.CreateTime,
StartPositions: common.CloneKeyDataPairs(c.StartPositions),
Aliases: common.CloneStringList(c.Aliases),
Properties: common.CloneKeyValuePairs(c.Properties),
State: c.State,
EnableDynamicField: c.EnableDynamicField,
Functions: CloneFunctions(c.Functions),
UpdateTimestamp: c.UpdateTimestamp,
SchemaVersion: c.SchemaVersion,
ShardInfos: shardInfos,
}
}
func (c *Collection) GetPartitionNum(filterUnavailable bool) int {
if !filterUnavailable {
return len(c.Partitions)
}
return lo.CountBy(c.Partitions, func(p *Partition) bool { return p.Available() })
}
func (c *Collection) Equal(other Collection) bool {
return c.TenantID == other.TenantID &&
c.DBID == other.DBID &&
CheckPartitionsEqual(c.Partitions, other.Partitions) &&
c.Name == other.Name &&
c.Description == other.Description &&
c.AutoID == other.AutoID &&
CheckFieldsEqual(c.Fields, other.Fields) &&
CheckStructArrayFieldsEqual(c.StructArrayFields, other.StructArrayFields) &&
c.ShardsNum == other.ShardsNum &&
c.ConsistencyLevel == other.ConsistencyLevel &&
checkParamsEqual(c.Properties, other.Properties) &&
c.EnableDynamicField == other.EnableDynamicField
}
func (c *Collection) ApplyUpdates(header *message.AlterCollectionMessageHeader, body *message.AlterCollectionMessageBody) {
updateMask := header.UpdateMask
updates := body.Updates
for _, field := range updateMask.GetPaths() {
switch field {
case message.FieldMaskDB:
c.DBID = updates.DbId
c.DBName = updates.DbName
case message.FieldMaskCollectionName:
c.Name = updates.CollectionName
case message.FieldMaskCollectionDescription:
c.Description = updates.Description
case message.FieldMaskCollectionConsistencyLevel:
c.ConsistencyLevel = updates.ConsistencyLevel
case message.FieldMaskCollectionProperties:
c.Properties = updates.Properties
case message.FieldMaskCollectionSchema:
c.AutoID = updates.Schema.AutoID
c.Fields = UnmarshalFieldModels(updates.Schema.Fields)
c.EnableDynamicField = updates.Schema.EnableDynamicField
c.Functions = UnmarshalFunctionModels(updates.Schema.Functions)
c.StructArrayFields = UnmarshalStructArrayFieldModels(updates.Schema.StructArrayFields)
c.SchemaVersion = updates.Schema.Version
}
}
}
func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection {
if coll == nil {
return nil
}
// backward compatible for deprecated fields
partitions := make([]*Partition, len(coll.PartitionIDs))
for idx := range coll.PartitionIDs {
partitions[idx] = &Partition{
PartitionID: coll.PartitionIDs[idx],
PartitionName: coll.PartitionNames[idx],
PartitionCreatedTimestamp: coll.PartitionCreatedTimestamps[idx],
}
}
shardInfos := make(map[string]*ShardInfo, len(coll.VirtualChannelNames))
for idx, channelName := range coll.VirtualChannelNames {
if len(coll.ShardInfos) == 0 {
shardInfos[channelName] = &ShardInfo{
VChannelName: channelName,
PChannelName: coll.PhysicalChannelNames[idx],
LastTruncateTimeTick: 0,
}
} else {
shardInfos[channelName] = &ShardInfo{
VChannelName: channelName,
PChannelName: coll.PhysicalChannelNames[idx],
LastTruncateTimeTick: coll.ShardInfos[idx].LastTruncateTimeTick,
}
}
}
return &Collection{
CollectionID: coll.ID,
DBID: coll.DbId,
Name: coll.Schema.Name,
DBName: coll.Schema.DbName,
Description: coll.Schema.Description,
AutoID: coll.Schema.AutoID,
Fields: UnmarshalFieldModels(coll.GetSchema().GetFields()),
StructArrayFields: UnmarshalStructArrayFieldModels(coll.GetSchema().GetStructArrayFields()),
Partitions: partitions,
VirtualChannelNames: coll.VirtualChannelNames,
PhysicalChannelNames: coll.PhysicalChannelNames,
ShardsNum: coll.ShardsNum,
ConsistencyLevel: coll.ConsistencyLevel,
CreateTime: coll.CreateTime,
StartPositions: coll.StartPositions,
State: coll.State,
Properties: coll.Properties,
EnableDynamicField: coll.Schema.EnableDynamicField,
UpdateTimestamp: coll.UpdateTimestamp,
SchemaVersion: coll.Schema.Version,
ShardInfos: shardInfos,
}
}
// MarshalCollectionModel marshal only collection-related information.
// partitions, aliases and fields won't be marshaled. They should be written to newly path.
func MarshalCollectionModel(coll *Collection) *pb.CollectionInfo {
return marshalCollectionModelWithConfig(coll, newDefaultConfig())
}
type config struct {
withFields bool
withPartitions bool
withStructArrayFields bool
}
type Option func(c *config)
func newDefaultConfig() *config {
return &config{withFields: false, withPartitions: false, withStructArrayFields: false}
}
func WithFields() Option {
return func(c *config) {
c.withFields = true
}
}
func WithPartitions() Option {
return func(c *config) {
c.withPartitions = true
}
}
func WithStructArrayFields() Option {
return func(c *config) {
c.withStructArrayFields = true
}
}
func marshalCollectionModelWithConfig(coll *Collection, c *config) *pb.CollectionInfo {
if coll == nil {
return nil
}
collSchema := &schemapb.CollectionSchema{
Name: coll.Name,
Description: coll.Description,
AutoID: coll.AutoID,
EnableDynamicField: coll.EnableDynamicField,
DbName: coll.DBName,
Version: coll.SchemaVersion,
}
if c.withFields {
fields := MarshalFieldModels(coll.Fields)
collSchema.Fields = fields
}
if c.withStructArrayFields {
structArrayFields := MarshalStructArrayFieldModels(coll.StructArrayFields)
collSchema.StructArrayFields = structArrayFields
}
shardInfos := make([]*pb.CollectionShardInfo, len(coll.ShardInfos))
for idx, channelName := range coll.VirtualChannelNames {
if shard, ok := coll.ShardInfos[channelName]; ok {
shardInfos[idx] = &pb.CollectionShardInfo{
LastTruncateTimeTick: shard.LastTruncateTimeTick,
}
} else {
shardInfos[idx] = &pb.CollectionShardInfo{
LastTruncateTimeTick: 0,
}
}
}
collectionPb := &pb.CollectionInfo{
ID: coll.CollectionID,
DbId: coll.DBID,
Schema: collSchema,
CreateTime: coll.CreateTime,
VirtualChannelNames: coll.VirtualChannelNames,
PhysicalChannelNames: coll.PhysicalChannelNames,
ShardsNum: coll.ShardsNum,
ConsistencyLevel: coll.ConsistencyLevel,
StartPositions: coll.StartPositions,
State: coll.State,
Properties: coll.Properties,
UpdateTimestamp: coll.UpdateTimestamp,
ShardInfos: shardInfos,
}
if c.withPartitions {
for _, partition := range coll.Partitions {
collectionPb.PartitionNames = append(collectionPb.PartitionNames, partition.PartitionName)
collectionPb.PartitionIDs = append(collectionPb.PartitionIDs, partition.PartitionID)
collectionPb.PartitionCreatedTimestamps = append(collectionPb.PartitionCreatedTimestamps, partition.PartitionCreatedTimestamp)
}
}
return collectionPb
}
func MarshalCollectionModelWithOption(coll *Collection, opts ...Option) *pb.CollectionInfo {
c := newDefaultConfig()
for _, opt := range opts {
opt(c)
}
return marshalCollectionModelWithConfig(coll, c)
}