enhance: [2.5] Add granular flush targets support for FlushAll operation (#44431)

issue: #44156
pr: #44234
Enhance FlushAll functionality to support targeting specific collections
within databases instead of only database-level flushing.

Changes include:

- Add FlushAllTarget message in data_coord.proto for granular targeting
- Support collection-specific flush operations within databases
- Maintain backward compatibility with deprecated db_name field

This enhancement allows users to flush specific collections without
affecting other collections in the same database, providing more precise
control over data persistence operations.

---------

Signed-off-by: Wei Liu <wei.liu@zilliz.com>
This commit is contained in:
wei liu 2025-09-28 10:37:06 +08:00 committed by GitHub
parent 2e0c0c08bb
commit 3a7a08f2b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 7197 additions and 3263 deletions

View File

@ -6,7 +6,7 @@ require (
github.com/blang/semver/v4 v4.0.0
github.com/cockroachdb/errors v1.9.1
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc
github.com/milvus-io/milvus/pkg/v2 v2.5.7
github.com/quasilyte/go-ruleguard/dsl v0.3.22
github.com/samber/lo v1.27.0

View File

@ -318,8 +318,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18 h1:BUMCAa4vS7apwQYVArHy2GTHdX3hUPAXh/ExyovJlZY=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc h1:WMkuIc+PJDma8JZjhwC4V91GDP7lLO1XPUU23PoXNQ0=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus/pkg/v2 v2.5.7 h1:b45jq1s1v03AekFucs2/dkkXohB57gEx7gspJuAkfbY=
github.com/milvus-io/milvus/pkg/v2 v2.5.7/go.mod h1:pImw1IGNS7k/5yvlZV2tZi5vZu1VQRlQij+r39d+XnI=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=

2
go.mod
View File

@ -22,7 +22,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/klauspost/compress v1.18.0
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc
github.com/minio/minio-go/v7 v7.0.73
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81

4
go.sum
View File

@ -648,8 +648,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119 h1:9VXijWu
github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18 h1:BUMCAa4vS7apwQYVArHy2GTHdX3hUPAXh/ExyovJlZY=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc h1:WMkuIc+PJDma8JZjhwC4V91GDP7lLO1XPUU23PoXNQ0=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/pulsar-client-go v0.12.1 h1:O2JZp1tsYiO7C0MQ4hrUY/aJXnn2Gry6hpm7UodghmE=
github.com/milvus-io/pulsar-client-go v0.12.1/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=

View File

@ -38,7 +38,7 @@ type Broker interface {
DescribeCollectionInternal(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error)
ShowPartitionsInternal(ctx context.Context, collectionID int64) ([]int64, error)
ShowCollections(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error)
ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)
ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)
ListDatabases(ctx context.Context) (*milvuspb.ListDatabasesResponse, error)
HasCollection(ctx context.Context, collectionID int64) (bool, error)
}
@ -118,7 +118,7 @@ func (b *coordinatorBroker) ShowCollections(ctx context.Context, dbName string)
return resp, nil
}
func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error) {
func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
ctx, cancel := context.WithTimeout(ctx, paramtable.Get().QueryCoordCfg.BrokerTimeout.GetAsDuration(time.Millisecond))
defer cancel()
resp, err := b.rootCoord.ShowCollectionIDs(ctx, &rootcoordpb.ShowCollectionIDsRequest{
@ -126,6 +126,7 @@ func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
),
AllowUnavailable: true,
DbNames: dbNames,
})
if err = merr.CheckRPCCall(resp, err); err != nil {

View File

@ -29,6 +29,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
@ -268,6 +269,67 @@ func (s *BrokerSuite) TestHasCollection() {
})
}
func (s *BrokerSuite) TestShowCollectionIDs() {
s.Run("normal", func() {
s.SetupTest()
dbName := "test_db"
expectedIDs := []int64{1, 2, 3}
s.rootCoordClient.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, req *rootcoordpb.ShowCollectionIDsRequest, options ...grpc.CallOption) (*rootcoordpb.ShowCollectionIDsResponse, error) {
s.Equal([]string{dbName}, req.GetDbNames())
return &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: dbName,
CollectionIDs: expectedIDs,
},
},
}, nil
})
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.NoError(err)
s.NotNil(resp)
s.Len(resp.GetDbCollections(), 1)
s.Equal(dbName, resp.GetDbCollections()[0].GetDbName())
s.ElementsMatch(expectedIDs, resp.GetDbCollections()[0].GetCollectionIDs())
s.TearDownTest()
})
s.Run("rpc_error", func() {
s.SetupTest()
dbName := "test_db"
expectedErr := errors.New("mock rpc error")
s.rootCoordClient.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).Return(nil, expectedErr)
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.Error(err)
s.Equal(expectedErr, err)
s.Nil(resp)
s.TearDownTest()
})
s.Run("milvus_error", func() {
s.SetupTest()
dbName := "test_db"
expectedErr := merr.ErrDatabaseNotFound
s.rootCoordClient.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).Return(&rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Status(expectedErr),
}, nil)
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.Error(err)
s.ErrorIs(err, expectedErr)
s.Nil(resp)
s.TearDownTest()
})
}
func TestBrokerSuite(t *testing.T) {
suite.Run(t, new(BrokerSuite))
}

View File

@ -198,9 +198,16 @@ func (_c *MockBroker_ListDatabases_Call) RunAndReturn(run func(context.Context)
return _c
}
// ShowCollectionIDs provides a mock function with given fields: ctx
func (_m *MockBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error) {
ret := _m.Called(ctx)
// ShowCollectionIDs provides a mock function with given fields: ctx, dbNames
func (_m *MockBroker) ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
_va := make([]interface{}, len(dbNames))
for _i := range dbNames {
_va[_i] = dbNames[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for ShowCollectionIDs")
@ -208,19 +215,19 @@ func (_m *MockBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowC
var r0 *rootcoordpb.ShowCollectionIDsResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)); ok {
return rf(ctx)
if rf, ok := ret.Get(0).(func(context.Context, ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)); ok {
return rf(ctx, dbNames...)
}
if rf, ok := ret.Get(0).(func(context.Context) *rootcoordpb.ShowCollectionIDsResponse); ok {
r0 = rf(ctx)
if rf, ok := ret.Get(0).(func(context.Context, ...string) *rootcoordpb.ShowCollectionIDsResponse); ok {
r0 = rf(ctx, dbNames...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*rootcoordpb.ShowCollectionIDsResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
if rf, ok := ret.Get(1).(func(context.Context, ...string) error); ok {
r1 = rf(ctx, dbNames...)
} else {
r1 = ret.Error(1)
}
@ -235,13 +242,21 @@ type MockBroker_ShowCollectionIDs_Call struct {
// ShowCollectionIDs is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockBroker_Expecter) ShowCollectionIDs(ctx interface{}) *MockBroker_ShowCollectionIDs_Call {
return &MockBroker_ShowCollectionIDs_Call{Call: _e.mock.On("ShowCollectionIDs", ctx)}
// - dbNames ...string
func (_e *MockBroker_Expecter) ShowCollectionIDs(ctx interface{}, dbNames ...interface{}) *MockBroker_ShowCollectionIDs_Call {
return &MockBroker_ShowCollectionIDs_Call{Call: _e.mock.On("ShowCollectionIDs",
append([]interface{}{ctx}, dbNames...)...)}
}
func (_c *MockBroker_ShowCollectionIDs_Call) Run(run func(ctx context.Context)) *MockBroker_ShowCollectionIDs_Call {
func (_c *MockBroker_ShowCollectionIDs_Call) Run(run func(ctx context.Context, dbNames ...string)) *MockBroker_ShowCollectionIDs_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context))
variadicArgs := make([]string, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(string)
}
}
run(args[0].(context.Context), variadicArgs...)
})
return _c
}
@ -251,7 +266,7 @@ func (_c *MockBroker_ShowCollectionIDs_Call) Return(_a0 *rootcoordpb.ShowCollect
return _c
}
func (_c *MockBroker_ShowCollectionIDs_Call) RunAndReturn(run func(context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)) *MockBroker_ShowCollectionIDs_Call {
func (_c *MockBroker_ShowCollectionIDs_Call) RunAndReturn(run func(context.Context, ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)) *MockBroker_ShowCollectionIDs_Call {
_c.Call.Return(run)
return _c
}

View File

@ -28,6 +28,7 @@ import (
"github.com/samber/lo"
"go.opentelemetry.io/otel"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
@ -87,18 +88,40 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
}, nil
}
channelCPs := make(map[string]*msgpb.MsgPosition, 0)
coll, err := s.handler.GetCollection(ctx, req.GetCollectionID())
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return nil, err
}
flushResult, err := s.flushCollection(ctx, req.GetCollectionID(), ts, req.GetSegmentIDs())
if err != nil {
log.Warn("fail to get collection", zap.Error(err))
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
}
return &datapb.FlushResponse{
Status: merr.Success(),
DbID: req.GetDbID(),
CollectionID: req.GetCollectionID(),
SegmentIDs: flushResult.GetSegmentIDs(),
TimeOfSeal: flushResult.GetTimeOfSeal(),
FlushSegmentIDs: flushResult.GetFlushSegmentIDs(),
FlushTs: flushResult.GetFlushTs(),
ChannelCps: flushResult.GetChannelCps(),
}, nil
}
func (s *Server) flushCollection(ctx context.Context, collectionID UniqueID, flushTs uint64, toFlushSegments []UniqueID) (*datapb.FlushResult, error) {
channelCPs := make(map[string]*msgpb.MsgPosition, 0)
coll, err := s.handler.GetCollection(ctx, collectionID)
if err != nil {
log.Warn("fail to get collection", zap.Error(err))
return nil, err
}
if coll == nil {
return &datapb.FlushResponse{
Status: merr.Status(merr.WrapErrCollectionNotFound(req.GetCollectionID())),
}, nil
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
// channel checkpoints must be gotten before sealSegment, make sure checkpoints is earlier than segment's endts
for _, vchannel := range coll.VChannelNames {
@ -106,26 +129,14 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
channelCPs[vchannel] = cp
}
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
}
timeOfSeal, _ := tsoutil.ParseTS(ts)
timeOfSeal, _ := tsoutil.ParseTS(flushTs)
sealedSegmentsIDDict := make(map[UniqueID]bool)
if !streamingutil.IsStreamingServiceEnabled() {
for _, channel := range coll.VChannelNames {
sealedSegmentIDs, err := s.segmentManager.SealAllSegments(ctx, channel, req.GetSegmentIDs())
sealedSegmentIDs, err := s.segmentManager.SealAllSegments(ctx, channel, toFlushSegments)
if err != nil {
return &datapb.FlushResponse{
Status: merr.Status(errors.Wrapf(err, "failed to flush collection %d",
req.GetCollectionID())),
}, nil
return nil, errors.Wrapf(err, "failed to flush collection %d", collectionID)
}
for _, sealedSegmentID := range sealedSegmentIDs {
sealedSegmentsIDDict[sealedSegmentID] = true
@ -133,7 +144,7 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
}
}
segments := s.meta.GetSegmentsOfCollection(ctx, req.GetCollectionID())
segments := s.meta.GetSegmentsOfCollection(ctx, collectionID)
flushSegmentIDs := make([]UniqueID, 0, len(segments))
for _, segment := range segments {
if segment != nil &&
@ -147,10 +158,10 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
if !streamingutil.IsStreamingServiceEnabled() {
var isUnimplemented bool
err = retry.Do(ctx, func() error {
nodeChannels := s.channelManager.GetNodeChannelsByCollectionID(req.GetCollectionID())
nodeChannels := s.channelManager.GetNodeChannelsByCollectionID(collectionID)
for nodeID, channelNames := range nodeChannels {
err = s.cluster.FlushChannels(ctx, nodeID, ts, channelNames)
err = s.cluster.FlushChannels(ctx, nodeID, flushTs, channelNames)
if err != nil && errors.Is(err, merr.ErrServiceUnimplemented) {
isUnimplemented = true
return nil
@ -162,36 +173,133 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
return nil
}, retry.Attempts(60)) // about 3min
if err != nil {
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
return nil, err
}
if isUnimplemented {
// For compatible with rolling upgrade from version 2.2.x,
// fall back to the flush logic of version 2.2.x;
log.Warn("DataNode FlushChannels unimplemented", zap.Error(err))
ts = 0
flushTs = 0
}
}
log.Info("flush response with segments",
zap.Int64("collectionID", req.GetCollectionID()),
zap.Int64("collectionID", collectionID),
zap.Int64s("sealSegments", lo.Keys(sealedSegmentsIDDict)),
zap.Int("flushedSegmentsCount", len(flushSegmentIDs)),
zap.Time("timeOfSeal", timeOfSeal),
zap.Uint64("flushTs", ts),
zap.Time("flushTs in time", tsoutil.PhysicalTime(ts)))
zap.Uint64("flushTs", flushTs),
zap.Time("flushTs in time", tsoutil.PhysicalTime(flushTs)))
return &datapb.FlushResponse{
Status: merr.Success(),
DbID: req.GetDbID(),
CollectionID: req.GetCollectionID(),
return &datapb.FlushResult{
CollectionID: collectionID,
SegmentIDs: lo.Keys(sealedSegmentsIDDict),
TimeOfSeal: timeOfSeal.Unix(),
FlushSegmentIDs: flushSegmentIDs,
FlushTs: ts,
FlushTs: flushTs,
ChannelCps: channelCPs,
DbName: coll.DatabaseName,
CollectionName: coll.Schema.GetName(),
}, nil
}
func resolveCollectionsToFlush(ctx context.Context, s *Server, req *datapb.FlushAllRequest) ([]int64, error) {
collectionsToFlush := make([]int64, 0)
if len(req.GetFlushTargets()) > 0 {
// Use flush_targets from request
for _, target := range req.GetFlushTargets() {
collectionsToFlush = append(collectionsToFlush, target.GetCollectionIds()...)
}
} else if req.GetDbName() != "" {
// Backward compatibility: use deprecated db_name field
showColRsp, err := s.broker.ShowCollectionIDs(ctx, req.GetDbName())
if err != nil {
log.Warn("failed to ShowCollectionIDs", zap.String("db", req.GetDbName()), zap.Error(err))
return nil, err
}
for _, dbCollection := range showColRsp.GetDbCollections() {
collectionsToFlush = append(collectionsToFlush, dbCollection.GetCollectionIDs()...)
}
} else {
// Flush all databases
dbsResp, err := s.broker.ListDatabases(ctx)
if err != nil {
return nil, err
}
for _, dbName := range dbsResp.GetDbNames() {
showColRsp, err := s.broker.ShowCollectionIDs(ctx, dbName)
if err != nil {
log.Warn("failed to ShowCollectionIDs", zap.String("db", dbName), zap.Error(err))
return nil, err
}
for _, dbCollection := range showColRsp.GetDbCollections() {
collectionsToFlush = append(collectionsToFlush, dbCollection.GetCollectionIDs()...)
}
}
}
return collectionsToFlush, nil
}
func (s *Server) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
log := log.Ctx(ctx)
log.Info("receive flushAll request")
ctx, sp := otel.Tracer(typeutil.DataCoordRole).Start(ctx, "DataCoord-Flush")
defer sp.End()
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
log.Info("server is not healthy", zap.Error(err), zap.Any("stateCode", s.GetStateCode()))
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return nil, err
}
// resolve collections to flush
collectionsToFlush, err := resolveCollectionsToFlush(ctx, s, req)
if err != nil {
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
var mu sync.Mutex
flushInfos := make([]*datapb.FlushResult, 0)
wg := errgroup.Group{}
// limit goroutine number to 100
wg.SetLimit(Params.DataCoordCfg.FlushAllMaxParallelTasks.GetAsInt())
for _, cid := range collectionsToFlush {
wg.Go(func() error {
flushResult, err := s.flushCollection(ctx, cid, ts, nil)
if err != nil {
log.Warn("failed to flush collection", zap.Int64("collectionID", cid), zap.Error(err))
return err
}
mu.Lock()
flushInfos = append(flushInfos, flushResult)
mu.Unlock()
return nil
})
}
err = wg.Wait()
if err != nil {
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
return &datapb.FlushAllResponse{
Status: merr.Success(),
FlushTs: ts,
FlushResults: flushInfos,
}, nil
}
@ -1410,7 +1518,10 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
}, nil
}
resp := &milvuspb.GetFlushAllStateResponse{Status: merr.Success()}
resp := &milvuspb.GetFlushAllStateResponse{
Status: merr.Success(),
FlushStates: make([]*milvuspb.FlushAllState, 0),
}
dbsRsp, err := s.broker.ListDatabases(ctx)
if err != nil {
@ -1418,43 +1529,96 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
resp.Status = merr.Status(err)
return resp, nil
}
dbNames := dbsRsp.DbNames
if req.GetDbName() != "" {
dbNames = lo.Filter(dbNames, func(dbName string, _ int) bool {
return dbName == req.GetDbName()
})
if len(dbNames) == 0 {
// Determine which databases to check
var targetDbs []string
if len(req.GetFlushTargets()) > 0 {
// Use flush_targets from request
for _, target := range req.GetFlushTargets() {
if target.GetDbName() != "" {
if !lo.Contains(dbsRsp.DbNames, target.GetDbName()) {
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(target.GetDbName()))
return resp, nil
}
targetDbs = append(targetDbs, target.GetDbName())
}
}
} else if req.GetDbName() != "" {
if !lo.Contains(dbsRsp.DbNames, req.GetDbName()) {
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(req.GetDbName()))
return resp, nil
}
// Backward compatibility: use deprecated db_name field
targetDbs = []string{req.GetDbName()}
} else {
// Check all databases
targetDbs = dbsRsp.DbNames
}
for _, dbName := range dbsRsp.DbNames {
// Remove duplicates
targetDbs = lo.Uniq(targetDbs)
allFlushed := true
for _, dbName := range targetDbs {
flushState := &milvuspb.FlushAllState{
DbName: dbName,
CollectionFlushStates: make(map[string]bool),
}
// Get collections to check for this database
var targetCollections []string
if len(req.GetFlushTargets()) > 0 {
// Check if specific collections are requested for this db
for _, target := range req.GetFlushTargets() {
if target.GetDbName() == dbName && len(target.GetCollectionNames()) > 0 {
targetCollections = target.GetCollectionNames()
break
}
}
}
showColRsp, err := s.broker.ShowCollections(ctx, dbName)
if err != nil {
log.Warn("failed to ShowCollections", zap.Error(err))
log.Warn("failed to ShowCollections", zap.String("db", dbName), zap.Error(err))
resp.Status = merr.Status(err)
return resp, nil
}
for _, collection := range showColRsp.GetCollectionIds() {
describeColRsp, err := s.broker.DescribeCollectionInternal(ctx, collection)
for idx, collectionID := range showColRsp.GetCollectionIds() {
collectionName := ""
if idx < len(showColRsp.GetCollectionNames()) {
collectionName = showColRsp.GetCollectionNames()[idx]
}
// If specific collections are requested, skip others
if len(targetCollections) > 0 && !lo.Contains(targetCollections, collectionName) {
continue
}
describeColRsp, err := s.broker.DescribeCollectionInternal(ctx, collectionID)
if err != nil {
log.Warn("failed to DescribeCollectionInternal", zap.Error(err))
log.Warn("failed to DescribeCollectionInternal",
zap.Int64("collectionID", collectionID), zap.Error(err))
resp.Status = merr.Status(err)
return resp, nil
}
collectionFlushed := true
for _, channel := range describeColRsp.GetVirtualChannelNames() {
channelCP := s.meta.GetChannelCheckpoint(channel)
if channelCP == nil || channelCP.GetTimestamp() < req.GetFlushAllTs() {
resp.Flushed = false
return resp, nil
collectionFlushed = false
allFlushed = false
break
}
}
flushState.CollectionFlushStates[collectionName] = collectionFlushed
}
resp.FlushStates = append(resp.FlushStates, flushState)
}
resp.Flushed = true
resp.Flushed = allFlushed
return resp, nil
}

View File

@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
@ -31,6 +32,7 @@ import (
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
@ -1645,3 +1647,859 @@ func (s *GcControlServiceSuite) TestTimeoutCtx() {
func TestGcControlService(t *testing.T) {
suite.Run(t, new(GcControlServiceSuite))
}
// createTestFlushAllServer creates a test server for FlushAll tests
func createTestFlushAllServer() *Server {
// Create a mock allocator that will be replaced by mockey
mockAlloc := &allocator.MockAllocator{}
mockBroker := &broker.MockBroker{}
server := &Server{
allocator: mockAlloc,
broker: mockBroker,
meta: &meta{
collections: typeutil.NewConcurrentMap[UniqueID, *collectionInfo](),
channelCPs: newChannelCps(),
segments: NewSegmentsInfo(),
},
// handler will be set to a mock in individual tests when needed
}
server.stateCode.Store(commonpb.StateCode_Healthy)
return server
}
func TestServer_FlushAll(t *testing.T) {
t.Run("server not healthy", func(t *testing.T) {
server := &Server{}
server.stateCode.Store(commonpb.StateCode_Abnormal)
req := &datapb.FlushAllRequest{}
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.Error(t, merr.Error(resp.GetStatus()))
})
t.Run("allocator error", func(t *testing.T) {
server := createTestFlushAllServer()
// Mock allocator AllocTimestamp to return error
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(0), errors.New("alloc error")).Build()
defer mockAllocTimestamp.UnPatch()
req := &datapb.FlushAllRequest{}
resp, err := server.FlushAll(context.Background(), req)
assert.Error(t, err)
assert.Nil(t, resp)
})
t.Run("broker ListDatabases error", func(t *testing.T) {
server := createTestFlushAllServer()
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ListDatabases to return error
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(nil, errors.New("list databases error")).Build()
defer mockListDatabases.UnPatch()
req := &datapb.FlushAllRequest{} // No specific targets, should list all databases
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.Error(t, merr.Error(resp.GetStatus()))
})
t.Run("broker ShowCollectionIDs error", func(t *testing.T) {
server := createTestFlushAllServer()
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ShowCollectionIDs to return error
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(nil, errors.New("broker error")).Build()
defer mockShowCollectionIDs.UnPatch()
req := &datapb.FlushAllRequest{
DbName: "test-db",
}
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.Error(t, merr.Error(resp.GetStatus()))
})
t.Run("empty collections in database", func(t *testing.T) {
server := createTestFlushAllServer()
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ShowCollectionIDs returns empty collections
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: "empty-db",
CollectionIDs: []int64{}, // Empty collections
},
},
}, nil).Build()
defer mockShowCollectionIDs.UnPatch()
req := &datapb.FlushAllRequest{
DbName: "empty-db",
}
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, uint64(12345), resp.GetFlushTs())
assert.Equal(t, 0, len(resp.GetFlushResults()))
})
t.Run("flush specific database successfully", func(t *testing.T) {
server := createTestFlushAllServer()
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ShowCollectionIDs
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: "test-db",
CollectionIDs: []int64{100, 101},
},
},
}, nil).Build()
defer mockShowCollectionIDs.UnPatch()
// Add collections to server meta with collection names
server.meta.AddCollection(&collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "collection1",
},
VChannelNames: []string{"channel1"},
})
server.meta.AddCollection(&collectionInfo{
ID: 101,
Schema: &schemapb.CollectionSchema{
Name: "collection2",
},
VChannelNames: []string{"channel2"},
})
// Mock handler GetCollection to return collection info
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
if collectionID == 100 {
return &collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "collection1",
},
}, nil
} else if collectionID == 101 {
return &collectionInfo{
ID: 101,
Schema: &schemapb.CollectionSchema{
Name: "collection2",
},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockGetCollection.UnPatch()
// Mock flushCollection to return success results
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
var collectionName string
if collectionID == 100 {
collectionName = "collection1"
} else if collectionID == 101 {
collectionName = "collection2"
}
return &datapb.FlushResult{
CollectionID: collectionID,
DbName: "test-db",
CollectionName: collectionName,
SegmentIDs: []int64{1000 + collectionID, 2000 + collectionID},
FlushSegmentIDs: []int64{1000 + collectionID, 2000 + collectionID},
TimeOfSeal: 12300,
FlushTs: flushTs,
ChannelCps: make(map[string]*msgpb.MsgPosition),
}, nil
}).Build()
defer mockFlushCollection.UnPatch()
req := &datapb.FlushAllRequest{
DbName: "test-db",
}
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, uint64(12345), resp.GetFlushTs())
assert.Equal(t, 2, len(resp.GetFlushResults()))
// Verify flush results
resultMap := make(map[int64]*datapb.FlushResult)
for _, result := range resp.GetFlushResults() {
resultMap[result.GetCollectionID()] = result
}
assert.Contains(t, resultMap, int64(100))
assert.Contains(t, resultMap, int64(101))
assert.Equal(t, "test-db", resultMap[100].GetDbName())
assert.Equal(t, "collection1", resultMap[100].GetCollectionName())
assert.Equal(t, "collection2", resultMap[101].GetCollectionName())
})
t.Run("flush with specific flush targets successfully", func(t *testing.T) {
server := createTestFlushAllServer()
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ShowCollectionIDs
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: "test-db",
CollectionIDs: []int64{100, 101},
},
},
}, nil).Build()
defer mockShowCollectionIDs.UnPatch()
// Add collections to server meta with collection names
server.meta.AddCollection(&collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "target-collection",
},
VChannelNames: []string{"channel1"},
})
server.meta.AddCollection(&collectionInfo{
ID: 101,
Schema: &schemapb.CollectionSchema{
Name: "other-collection",
},
VChannelNames: []string{"channel2"},
})
// Mock handler GetCollection to return collection info
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
if collectionID == 100 {
return &collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "target-collection",
},
}, nil
} else if collectionID == 101 {
return &collectionInfo{
ID: 101,
Schema: &schemapb.CollectionSchema{
Name: "other-collection",
},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockGetCollection.UnPatch()
// Mock flushCollection to return success result
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
return &datapb.FlushResult{
CollectionID: collectionID,
DbName: "test-db",
CollectionName: "target-collection",
SegmentIDs: []int64{1100, 2100},
FlushSegmentIDs: []int64{1100, 2100},
TimeOfSeal: 12300,
FlushTs: flushTs,
ChannelCps: make(map[string]*msgpb.MsgPosition),
}, nil
}).Build()
defer mockFlushCollection.UnPatch()
req := &datapb.FlushAllRequest{
FlushTargets: []*datapb.FlushAllTarget{
{
DbName: "test-db",
CollectionIds: []int64{100},
},
},
}
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, uint64(12345), resp.GetFlushTs())
assert.Equal(t, 1, len(resp.GetFlushResults()))
// Verify only the target collection was flushed
result := resp.GetFlushResults()[0]
assert.Equal(t, int64(100), result.GetCollectionID())
assert.Equal(t, "test-db", result.GetDbName())
assert.Equal(t, "target-collection", result.GetCollectionName())
assert.Equal(t, []int64{1100, 2100}, result.GetSegmentIDs())
assert.Equal(t, []int64{1100, 2100}, result.GetFlushSegmentIDs())
})
t.Run("flush all databases successfully", func(t *testing.T) {
server := createTestFlushAllServer()
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
// Mock allocator AllocTimestamp
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
defer mockAllocTimestamp.UnPatch()
// Mock broker ListDatabases
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"db1", "db2"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock broker ShowCollectionIDs for different databases
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).To(func(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
if len(dbNames) == 0 {
return nil, errors.New("no database names provided")
}
dbName := dbNames[0] // Use the first database name
if dbName == "db1" {
return &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: "db1",
CollectionIDs: []int64{100},
},
},
}, nil
}
if dbName == "db2" {
return &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: "db2",
CollectionIDs: []int64{200},
},
},
}, nil
}
return nil, errors.New("unknown database")
}).Build()
defer mockShowCollectionIDs.UnPatch()
// Add collections to server meta with collection names
server.meta.AddCollection(&collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "collection1",
},
VChannelNames: []string{"channel1"},
})
server.meta.AddCollection(&collectionInfo{
ID: 200,
Schema: &schemapb.CollectionSchema{
Name: "collection2",
},
VChannelNames: []string{"channel2"},
})
// Mock handler GetCollection to return collection info
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
if collectionID == 100 {
return &collectionInfo{
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "collection1",
},
}, nil
} else if collectionID == 200 {
return &collectionInfo{
ID: 200,
Schema: &schemapb.CollectionSchema{
Name: "collection2",
},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockGetCollection.UnPatch()
// Mock flushCollection for different collections
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
var dbName, collectionName string
if collectionID == 100 {
dbName = "db1"
collectionName = "collection1"
} else if collectionID == 200 {
dbName = "db2"
collectionName = "collection2"
}
return &datapb.FlushResult{
CollectionID: collectionID,
DbName: dbName,
CollectionName: collectionName,
SegmentIDs: []int64{collectionID + 1000, collectionID + 2000},
FlushSegmentIDs: []int64{collectionID + 1000, collectionID + 2000},
TimeOfSeal: 12300,
FlushTs: flushTs,
ChannelCps: make(map[string]*msgpb.MsgPosition),
}, nil
}).Build()
defer mockFlushCollection.UnPatch()
req := &datapb.FlushAllRequest{} // No specific targets, flush all databases
resp, err := server.FlushAll(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, uint64(12345), resp.GetFlushTs())
assert.Equal(t, 2, len(resp.GetFlushResults()))
// Verify results from both databases
resultMap := make(map[string]*datapb.FlushResult)
for _, result := range resp.GetFlushResults() {
resultMap[result.GetDbName()] = result
}
assert.Contains(t, resultMap, "db1")
assert.Contains(t, resultMap, "db2")
assert.Equal(t, int64(100), resultMap["db1"].GetCollectionID())
assert.Equal(t, int64(200), resultMap["db2"].GetCollectionID())
})
}
// createTestGetFlushAllStateServer creates a test server for GetFlushAllState tests
func createTestGetFlushAllStateServer() *Server {
// Create a mock broker that will be replaced by mockey
mockBroker := &broker.MockBroker{}
server := &Server{
broker: mockBroker,
meta: &meta{
channelCPs: newChannelCps(),
},
}
server.stateCode.Store(commonpb.StateCode_Healthy)
return server
}
func TestServer_GetFlushAllState(t *testing.T) {
t.Run("server not healthy", func(t *testing.T) {
server := &Server{}
server.stateCode.Store(commonpb.StateCode_Abnormal)
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.Error(t, merr.Error(resp.GetStatus()))
})
t.Run("ListDatabases error", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases error
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(nil, errors.New("list databases error")).Build()
defer mockListDatabases.UnPatch()
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.Error(t, merr.Error(resp.GetStatus()))
})
t.Run("check all databases", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"db1", "db2"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections for db1
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).To(func(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error) {
if dbName == "db1" {
return &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100},
CollectionNames: []string{"collection1"},
}, nil
}
if dbName == "db2" {
return &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{200},
CollectionNames: []string{"collection2"},
}, nil
}
return nil, errors.New("unknown db")
}).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
if collectionID == 100 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil
}
if collectionID == 200 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel2"},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockDescribeCollection.UnPatch()
// Setup channel checkpoints - both flushed
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000}
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 15000}
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345, // No specific targets, check all databases
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 2, len(resp.GetFlushStates()))
// Check both databases are present
dbNames := make(map[string]bool)
for _, flushState := range resp.GetFlushStates() {
dbNames[flushState.GetDbName()] = true
}
assert.True(t, dbNames["db1"])
assert.True(t, dbNames["db2"])
assert.True(t, resp.GetFlushed()) // Overall flushed
})
t.Run("channel checkpoint not found", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"test-db"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100},
CollectionNames: []string{"collection1"},
}, nil).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).Return(&milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil).Build()
defer mockDescribeCollection.UnPatch()
// No channel checkpoint set - should be considered not flushed
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
DbName: "test-db",
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 1, len(resp.GetFlushStates()))
flushState := resp.GetFlushStates()[0]
assert.Equal(t, "test-db", flushState.GetDbName())
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates()))
assert.False(t, flushState.GetCollectionFlushStates()["collection1"]) // Not flushed
assert.False(t, resp.GetFlushed()) // Overall not flushed
})
t.Run("channel checkpoint timestamp too low", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"test-db"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100},
CollectionNames: []string{"collection1"},
}, nil).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).Return(&milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil).Build()
defer mockDescribeCollection.UnPatch()
// Setup channel checkpoint with timestamp lower than FlushAllTs
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 10000}
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
DbName: "test-db",
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 1, len(resp.GetFlushStates()))
flushState := resp.GetFlushStates()[0]
assert.Equal(t, "test-db", flushState.GetDbName())
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates()))
assert.False(t, flushState.GetCollectionFlushStates()["collection1"]) // Not flushed
assert.False(t, resp.GetFlushed()) // Overall not flushed
})
t.Run("specific database flushed successfully", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases (called even when DbName is specified)
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"test-db"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections for specific database
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100, 101},
CollectionNames: []string{"collection1", "collection2"},
}, nil).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
if collectionID == 100 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil
}
if collectionID == 101 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel2"},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockDescribeCollection.UnPatch()
// Setup channel checkpoints - both flushed (timestamps higher than FlushAllTs)
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000}
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 16000}
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
DbName: "test-db",
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 1, len(resp.GetFlushStates()))
flushState := resp.GetFlushStates()[0]
assert.Equal(t, "test-db", flushState.GetDbName())
assert.Equal(t, 2, len(flushState.GetCollectionFlushStates()))
assert.True(t, flushState.GetCollectionFlushStates()["collection1"]) // Flushed
assert.True(t, flushState.GetCollectionFlushStates()["collection2"]) // Flushed
assert.True(t, resp.GetFlushed()) // Overall flushed
})
t.Run("check with flush targets successfully", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases (called even when FlushTargets are specified)
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"test-db"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections for specific database
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100, 101},
CollectionNames: []string{"target-collection", "other-collection"},
}, nil).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
if collectionID == 100 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil
}
if collectionID == 101 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel2"},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockDescribeCollection.UnPatch()
// Setup channel checkpoints - target collection flushed, other not checked
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000}
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 10000} // Won't be checked due to filtering
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345,
FlushTargets: []*milvuspb.FlushAllTarget{
{
DbName: "test-db",
CollectionNames: []string{"target-collection"},
},
},
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 1, len(resp.GetFlushStates()))
flushState := resp.GetFlushStates()[0]
assert.Equal(t, "test-db", flushState.GetDbName())
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates())) // Only target collection checked
assert.True(t, flushState.GetCollectionFlushStates()["target-collection"]) // Flushed
assert.True(t, resp.GetFlushed()) // Overall flushed (only checking target collection)
})
t.Run("mixed flush states - partial success", func(t *testing.T) {
server := createTestGetFlushAllStateServer()
// Mock ListDatabases
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"db1", "db2"},
}, nil).Build()
defer mockListDatabases.UnPatch()
// Mock ShowCollections for different databases
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).To(func(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error) {
if dbName == "db1" {
return &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{100},
CollectionNames: []string{"collection1"},
}, nil
}
if dbName == "db2" {
return &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{200},
CollectionNames: []string{"collection2"},
}, nil
}
return nil, errors.New("unknown db")
}).Build()
defer mockShowCollections.UnPatch()
// Mock DescribeCollectionInternal
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
if collectionID == 100 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel1"},
}, nil
}
if collectionID == 200 {
return &milvuspb.DescribeCollectionResponse{
Status: merr.Success(),
VirtualChannelNames: []string{"channel2"},
}, nil
}
return nil, errors.New("collection not found")
}).Build()
defer mockDescribeCollection.UnPatch()
// Setup channel checkpoints - db1 flushed, db2 not flushed
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000} // Flushed
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 10000} // Not flushed
req := &milvuspb.GetFlushAllStateRequest{
FlushAllTs: 12345, // Check all databases
}
resp, err := server.GetFlushAllState(context.Background(), req)
assert.NoError(t, err)
assert.NoError(t, merr.Error(resp.GetStatus()))
assert.Equal(t, 2, len(resp.GetFlushStates()))
// Verify mixed flush states
stateMap := make(map[string]*milvuspb.FlushAllState)
for _, state := range resp.GetFlushStates() {
stateMap[state.GetDbName()] = state
}
assert.Contains(t, stateMap, "db1")
assert.Contains(t, stateMap, "db2")
assert.True(t, stateMap["db1"].GetCollectionFlushStates()["collection1"]) // db1 flushed
assert.False(t, stateMap["db2"].GetCollectionFlushStates()["collection2"]) // db2 not flushed
assert.False(t, resp.GetFlushed()) // Overall not flushed due to db2
})
}

View File

@ -172,6 +172,18 @@ func (c *Client) Flush(ctx context.Context, req *datapb.FlushRequest, opts ...gr
})
}
// FlushAll flushes all collections' data
func (c *Client) FlushAll(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
return wrapGrpcCall(ctx, c, func(client datapb.DataCoordClient) (*datapb.FlushAllResponse, error) {
return client.FlushAll(ctx, req)
})
}
// AssignSegmentID applies allocations for specified Coolection/Partition and related Channel Name(Virtial Channel)
//
// ctx is the context to control request deadline and cancellation

View File

@ -20,23 +20,19 @@ import (
"context"
"math/rand"
"os"
"strings"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
"github.com/milvus-io/milvus/pkg/v2/util/etcd"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
@ -44,18 +40,7 @@ import (
var mockErr = errors.New("mock grpc err")
func TestMain(m *testing.M) {
// init embed etcd
embedetcdServer, tempDir, err := etcd.StartTestEmbedEtcdServer()
if err != nil {
log.Fatal("failed to start embed etcd server", zap.Error(err))
}
defer os.RemoveAll(tempDir)
defer embedetcdServer.Close()
addrs := etcd.GetEmbedEtcdEndpoints(embedetcdServer)
paramtable.Init()
paramtable.Get().Save(Params.EtcdCfg.Endpoints.Key, strings.Join(addrs, ","))
rand.Seed(time.Now().UnixNano())
os.Exit(m.Run())
@ -2369,3 +2354,55 @@ func Test_NotifyDroppartition(t *testing.T) {
_, err = client.NotifyDropPartition(ctx, &datapb.NotifyDropPartitionRequest{})
assert.ErrorIs(t, err, context.DeadlineExceeded)
}
func Test_FlushAll(t *testing.T) {
paramtable.Init()
ctx := context.Background()
client, err := NewClient(ctx)
assert.NoError(t, err)
assert.NotNil(t, client)
defer client.Close()
mockDC := mocks.NewMockDataCoordClient(t)
mockGrpcClient := mocks.NewMockGrpcClient[datapb.DataCoordClient](t)
mockGrpcClient.EXPECT().Close().Return(nil)
mockGrpcClient.EXPECT().GetNodeID().Return(1)
mockGrpcClient.EXPECT().ReCall(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, f func(datapb.DataCoordClient) (interface{}, error)) (interface{}, error) {
return f(mockDC)
})
client.(*Client).grpcClient = mockGrpcClient
// test success
mockDC.EXPECT().FlushAll(mock.Anything, mock.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Success(),
}, nil)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.Nil(t, err)
// test return error status
mockDC.ExpectedCalls = nil
mockDC.EXPECT().FlushAll(mock.Anything, mock.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Status(merr.ErrServiceNotReady),
}, nil)
rsp, err := client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.NotEqual(t, int32(0), rsp.GetStatus().GetCode())
assert.Nil(t, err)
// test return error
mockDC.ExpectedCalls = nil
mockDC.EXPECT().FlushAll(mock.Anything, mock.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Success(),
}, mockErr)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.NotNil(t, err)
// test ctx done
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
time.Sleep(20 * time.Millisecond)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.ErrorIs(t, err, context.DeadlineExceeded)
}

View File

@ -305,6 +305,11 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
return s.dataCoord.Flush(ctx, req)
}
// FlushAll flushes all collections' data
func (s *Server) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
return s.dataCoord.FlushAll(ctx, req)
}
// AssignSegmentID requests to allocate segment space for insert
func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentIDRequest) (*datapb.AssignSegmentIDResponse, error) {
return s.dataCoord.AssignSegmentID(ctx, req)

View File

@ -626,6 +626,65 @@ func (_c *MockDataCoord_Flush_Call) RunAndReturn(run func(context.Context, *data
return _c
}
// FlushAll provides a mock function with given fields: _a0, _a1
func (_m *MockDataCoord) FlushAll(_a0 context.Context, _a1 *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) *datapb.FlushAllResponse); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockDataCoord_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MockDataCoord_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - _a0 context.Context
// - _a1 *datapb.FlushAllRequest
func (_e *MockDataCoord_Expecter) FlushAll(_a0 interface{}, _a1 interface{}) *MockDataCoord_FlushAll_Call {
return &MockDataCoord_FlushAll_Call{Call: _e.mock.On("FlushAll", _a0, _a1)}
}
func (_c *MockDataCoord_FlushAll_Call) Run(run func(_a0 context.Context, _a1 *datapb.FlushAllRequest)) *MockDataCoord_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest))
})
return _c
}
func (_c *MockDataCoord_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MockDataCoord_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockDataCoord_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)) *MockDataCoord_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: _a0, _a1
func (_m *MockDataCoord) GcConfirm(_a0 context.Context, _a1 *datapb.GcConfirmRequest) (*datapb.GcConfirmResponse, error) {
ret := _m.Called(_a0, _a1)

View File

@ -818,6 +818,80 @@ func (_c *MockDataCoordClient_Flush_Call) RunAndReturn(run func(context.Context,
return _c
}
// FlushAll provides a mock function with given fields: ctx, in, opts
func (_m *MockDataCoordClient) FlushAll(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) *datapb.FlushAllResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockDataCoordClient_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MockDataCoordClient_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - ctx context.Context
// - in *datapb.FlushAllRequest
// - opts ...grpc.CallOption
func (_e *MockDataCoordClient_Expecter) FlushAll(ctx interface{}, in interface{}, opts ...interface{}) *MockDataCoordClient_FlushAll_Call {
return &MockDataCoordClient_FlushAll_Call{Call: _e.mock.On("FlushAll",
append([]interface{}{ctx, in}, opts...)...)}
}
func (_c *MockDataCoordClient_FlushAll_Call) Run(run func(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption)) *MockDataCoordClient_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]grpc.CallOption, len(args)-2)
for i, a := range args[2:] {
if a != nil {
variadicArgs[i] = a.(grpc.CallOption)
}
}
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest), variadicArgs...)
})
return _c
}
func (_c *MockDataCoordClient_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MockDataCoordClient_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockDataCoordClient_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)) *MockDataCoordClient_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: ctx, in, opts
func (_m *MockDataCoordClient) GcConfirm(ctx context.Context, in *datapb.GcConfirmRequest, opts ...grpc.CallOption) (*datapb.GcConfirmResponse, error) {
_va := make([]interface{}, len(opts))

View File

@ -4104,12 +4104,8 @@ func (node *Proxy) CalcDistance(ctx context.Context, request *milvuspb.CalcDista
}, nil
}
// FlushAll notifies Proxy to flush all collection's DML messages.
func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest) (*milvuspb.FlushAllResponse, error) {
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-FlushAll")
defer sp.End()
log := log.With(zap.String("db", req.GetDbName()))
// Flush notify data nodes to persist the data of collection.
func (node *Proxy) FlushAll(ctx context.Context, request *milvuspb.FlushAllRequest) (*milvuspb.FlushAllResponse, error) {
resp := &milvuspb.FlushAllResponse{
Status: merr.Success(),
}
@ -4117,83 +4113,69 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
resp.Status = merr.Status(err)
return resp, nil
}
log.Info(rpcReceived("FlushAll"))
hasError := func(status *commonpb.Status, err error) bool {
if err != nil {
resp.Status = merr.Status(err)
log.Warn("FlushAll failed", zap.Error(err))
return true
}
if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("FlushAll failed", zap.String("err", status.GetReason()))
resp.Status = status
return true
}
return false
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-FlushAll")
defer sp.End()
ft := &flushAllTask{
ctx: ctx,
Condition: NewTaskCondition(ctx),
FlushAllRequest: request,
rootCoord: node.rootCoord,
dataCoord: node.dataCoord,
replicateMsgStream: node.replicateMsgStream,
}
dbsRsp, err := node.rootCoord.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases)),
})
if hasError(dbsRsp.GetStatus(), err) {
return resp, nil
}
dbNames := dbsRsp.DbNames
if req.GetDbName() != "" {
dbNames = lo.Filter(dbNames, func(dbName string, _ int) bool {
return dbName == req.GetDbName()
})
if len(dbNames) == 0 {
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(req.GetDbName()))
return resp, nil
method := "FlushAll"
tr := timerecord.NewTimeRecorder(method)
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.TotalLabel, request.GetDbName(), "").Inc()
log := log.Ctx(ctx).With(
zap.String("role", typeutil.ProxyRole),
zap.String("db", request.DbName))
log.Debug(rpcReceived(method))
var enqueuedTask task = ft
if streamingutil.IsStreamingServiceEnabled() {
enqueuedTask = &flushAllTaskbyStreamingService{
flushAllTask: ft,
chMgr: node.chMgr,
}
}
for _, dbName := range dbNames {
// Flush all collections to accelerate the flushAll progress
showColRsp, err := node.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections)),
DbName: dbName,
})
if hasError(showColRsp.GetStatus(), err) {
return resp, nil
}
group, ctx := errgroup.WithContext(ctx)
for _, collection := range showColRsp.GetCollectionNames() {
collection := collection
group.Go(func() error {
flushRsp, err := node.Flush(ctx, &milvuspb.FlushRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
DbName: dbName,
CollectionNames: []string{collection},
})
if err = merr.CheckRPCCall(flushRsp, err); err != nil {
return err
}
return nil
})
}
err = group.Wait()
if hasError(nil, err) {
return resp, nil
}
}
// allocate current ts as FlushAllTs
ts, err := node.tsoAllocator.AllocOne(ctx)
if err != nil {
log.Warn("FlushAll failed", zap.Error(err))
if err := node.sched.dcQueue.Enqueue(enqueuedTask); err != nil {
log.Warn(rpcFailedToEnqueue(method), zap.Error(err))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.AbandonLabel, request.GetDbName(), "").Inc()
resp.Status = merr.Status(err)
return resp, nil
}
resp.FlushAllTs = ts
log.Debug(rpcEnqueued(method),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
log.Info(rpcDone("FlushAll"), zap.Uint64("FlushAllTs", ts),
zap.Time("FlushAllTime", tsoutil.PhysicalTime(ts)))
return resp, nil
if err := ft.WaitToFinish(); err != nil {
log.Warn(
rpcFailedToWaitToFinish(method),
zap.Error(err),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel, request.GetDbName(), "").Inc()
resp.Status = merr.Status(err)
return resp, nil
}
log.Debug(
rpcDone(method),
zap.Uint64("FlushAllTs", ft.result.GetFlushAllTs()),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.SuccessLabel, request.GetDbName(), "").Inc()
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
return ft.result, nil
}
// GetDdChannel returns the used channel for dd operations.

View File

@ -24,6 +24,7 @@ import (
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/cockroachdb/errors"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
@ -39,6 +40,8 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/allocator"
grpcdatacoordclient "github.com/milvus-io/milvus/internal/distributed/datacoord/client"
grpcrootcoordclient "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
mhttp "github.com/milvus-io/milvus/internal/http"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/internal/util/dependency"
@ -398,200 +401,204 @@ func TestProxy_InvalidResourceGroupName(t *testing.T) {
})
}
func TestProxy_FlushAll_DbCollection(t *testing.T) {
tests := []struct {
testName string
FlushRequest *milvuspb.FlushAllRequest
ExpectedSuccess bool
}{
{"flushAll", &milvuspb.FlushAllRequest{}, true},
{"flushAll set db", &milvuspb.FlushAllRequest{DbName: "default"}, true},
{"flushAll set db, db not exist", &milvuspb.FlushAllRequest{DbName: "default2"}, false},
}
cacheBak := globalMetaCache
defer func() { globalMetaCache = cacheBak }()
// set expectations
cache := NewMockCache(t)
cache.On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), nil).Maybe()
cache.On("RemoveDatabase",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
).Maybe()
globalMetaCache = cache
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
factory := dependency.NewDefaultFactory(true)
ctx := context.Background()
paramtable.Init()
node, err := NewProxy(ctx, factory)
assert.NoError(t, err)
node.UpdateStateCode(commonpb.StateCode_Healthy)
node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(),
}
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer(ctx, []string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
assert.NoError(t, err)
err = node.sched.Start()
assert.NoError(t, err)
defer node.sched.Close()
node.dataCoord = mocks.NewMockDataCoordClient(t)
node.rootCoord = mocks.NewMockRootCoordClient(t)
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
node.dataCoord.(*mocks.MockDataCoordClient).EXPECT().Flush(mock.Anything, mock.Anything).
Return(&datapb.FlushResponse{Status: successStatus}, nil).Maybe()
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{Status: successStatus, CollectionNames: []string{"col-0"}}, nil).Maybe()
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
resp, err := node.FlushAll(ctx, test.FlushRequest)
assert.NoError(t, err)
if test.ExpectedSuccess {
assert.True(t, merr.Ok(resp.GetStatus()))
} else {
assert.NotEqual(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
}
})
}
}
func TestProxy_FlushAll(t *testing.T) {
// createTestProxy creates a test proxy instance with all necessary setup
func createTestProxy() *Proxy {
factory := dependency.NewDefaultFactory(true)
ctx := context.Background()
paramtable.Init()
node, err := NewProxy(ctx, factory)
assert.NoError(t, err)
node, _ := NewProxy(ctx, factory)
node.UpdateStateCode(commonpb.StateCode_Healthy)
node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(),
}
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream, _ = node.factory.NewMsgStream(node.ctx)
node.replicateMsgStream.AsProducer(ctx, []string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
assert.NoError(t, err)
err = node.sched.Start()
assert.NoError(t, err)
defer node.sched.Close()
node.dataCoord = mocks.NewMockDataCoordClient(t)
node.rootCoord = mocks.NewMockRootCoordClient(t)
node.sched, _ = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
node.sched.Start()
cacheBak := globalMetaCache
defer func() { globalMetaCache = cacheBak }()
return node
}
// set expectations
cache := NewMockCache(t)
cache.On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), nil).Once()
func TestProxy_FlushAll_NoDatabase(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_NoDatabase", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
cache.On("RemoveDatabase",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
).Maybe()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
globalMetaCache = cache
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
node.dataCoord.(*mocks.MockDataCoordClient).EXPECT().Flush(mock.Anything, mock.Anything).
Return(&datapb.FlushResponse{Status: successStatus}, nil).Maybe()
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{Status: successStatus, CollectionNames: []string{"col-0"}}, nil).Maybe()
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
// Mock grpc mix coord client FlushAll method
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
mockey.Mock((*grpcdatacoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: successStatus}, nil
}).Build()
t.Run("FlushAll", func(t *testing.T) {
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
// Mock rootCoord client methods
mockey.Mock((*grpcrootcoordclient.Client).ListDatabases).To(func(ctx context.Context, req *milvuspb.ListDatabasesRequest, opts ...grpc.CallOption) (*milvuspb.ListDatabasesResponse, error) {
return &milvuspb.ListDatabasesResponse{
Status: successStatus,
DbNames: []string{"default"},
}, nil
}).Build()
mockey.Mock((*grpcrootcoordclient.Client).ShowCollections).To(func(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
return &milvuspb.ShowCollectionsResponse{
Status: successStatus,
CollectionIds: []int64{1, 2, 3},
}, nil
}).Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
datacoord := &grpcdatacoordclient.Client{}
node.dataCoord = datacoord
rootcoord := &grpcrootcoordclient.Client{}
node.rootCoord = rootcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
// Assert: Verify results
assert.NoError(t, err)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
t.Run("FlushAll failed, server is abnormal", func(t *testing.T) {
func TestProxy_FlushAll_WithDefaultDatabase(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_WithDefaultDatabase", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Mock grpc mix coord client FlushAll method for default database
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
mockey.Mock((*grpcdatacoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: successStatus}, nil
}).Build()
// Mock rootCoord client methods (not needed for this test as it specifies a database)
mockey.Mock((*grpcrootcoordclient.Client).ShowCollections).To(func(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
return &milvuspb.ShowCollectionsResponse{
Status: successStatus,
CollectionIds: []int64{1, 2, 3},
}, nil
}).Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
datacoord := &grpcdatacoordclient.Client{}
node.dataCoord = datacoord
rootcoord := &grpcrootcoordclient.Client{}
node.rootCoord = rootcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default"})
// Assert: Verify results
assert.NoError(t, err)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
func TestProxy_FlushAll_DatabaseNotExist(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_DatabaseNotExist", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Mock grpc mix coord client FlushAll method for non-existent database
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
mockey.Mock((*grpcdatacoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: successStatus}, nil
}).Build()
// Mock rootCoord client methods (not needed for this test as it specifies a database)
mockey.Mock((*grpcrootcoordclient.Client).ShowCollections).To(func(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
return &milvuspb.ShowCollectionsResponse{
Status: successStatus,
CollectionIds: []int64{1, 2, 3},
}, nil
}).Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
datacoord := &grpcdatacoordclient.Client{}
node.dataCoord = datacoord
rootcoord := &grpcrootcoordclient.Client{}
node.rootCoord = rootcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default2"})
// Assert: Verify results
assert.NoError(t, err)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
func TestProxy_FlushAll_ServerAbnormal(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_ServerAbnormal", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
datacorrd := &grpcdatacoordclient.Client{}
node.dataCoord = datacorrd
// Set node state to abnormal
node.UpdateStateCode(commonpb.StateCode_Abnormal)
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
// Assert: Verify results
assert.NoError(t, err)
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
node.UpdateStateCode(commonpb.StateCode_Healthy)
})
t.Run("FlushAll failed, get id failed", func(t *testing.T) {
globalMetaCache.(*MockCache).On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), errors.New("mock error")).Once()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
globalMetaCache.(*MockCache).On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), nil).Once()
})
t.Run("FlushAll failed, DataCoord flush failed", func(t *testing.T) {
node.dataCoord.(*mocks.MockDataCoordClient).ExpectedCalls = nil
node.dataCoord.(*mocks.MockDataCoordClient).EXPECT().Flush(mock.Anything, mock.Anything).
Return(&datapb.FlushResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "mock err",
},
}, nil).Maybe()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
})
t.Run("FlushAll failed, RootCoord showCollections failed", func(t *testing.T) {
node.rootCoord.(*mocks.MockRootCoordClient).ExpectedCalls = nil
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "mock err",
},
}, nil).Maybe()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
})
t.Run("FlushAll failed, RootCoord showCollections failed", func(t *testing.T) {
node.rootCoord.(*mocks.MockRootCoordClient).ExpectedCalls = nil
node.rootCoord.(*mocks.MockRootCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "mock err",
},
}, nil).Maybe()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
})
}

View File

@ -84,6 +84,7 @@ const (
HasPartitionTaskName = "HasPartitionTask"
ShowPartitionTaskName = "ShowPartitionTask"
FlushTaskName = "FlushTask"
FlushAllTaskName = "FlushAllTask"
LoadCollectionTaskName = "LoadCollectionTask"
ReleaseCollectionTaskName = "ReleaseCollectionTask"
LoadPartitionTaskName = "LoadPartitionsTask"

View File

@ -116,7 +116,6 @@ func (t *flushTask) Execute(ctx context.Context) error {
coll2FlushTs[collName] = resp.GetFlushTs()
channelCps = resp.GetChannelCps()
}
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.FlushRequest)
t.result = &milvuspb.FlushResponse{
Status: merr.Success(),
DbName: t.GetDbName(),

View File

@ -0,0 +1,200 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
type flushAllTask struct {
baseTask
Condition
*milvuspb.FlushAllRequest
ctx context.Context
rootCoord types.RootCoordClient
dataCoord types.DataCoordClient
result *milvuspb.FlushAllResponse
replicateMsgStream msgstream.MsgStream
}
func (t *flushAllTask) TraceCtx() context.Context {
return t.ctx
}
func (t *flushAllTask) ID() UniqueID {
return t.Base.MsgID
}
func (t *flushAllTask) SetID(uid UniqueID) {
t.Base.MsgID = uid
}
func (t *flushAllTask) Name() string {
return FlushAllTaskName
}
func (t *flushAllTask) Type() commonpb.MsgType {
return t.Base.MsgType
}
func (t *flushAllTask) BeginTs() Timestamp {
return t.Base.Timestamp
}
func (t *flushAllTask) EndTs() Timestamp {
return t.Base.Timestamp
}
func (t *flushAllTask) SetTs(ts Timestamp) {
t.Base.Timestamp = ts
}
func (t *flushAllTask) OnEnqueue() error {
if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
t.Base.MsgType = commonpb.MsgType_Flush
t.Base.SourceID = paramtable.GetNodeID()
return nil
}
func (t *flushAllTask) PreExecute(ctx context.Context) error {
return nil
}
func (t *flushAllTask) Execute(ctx context.Context) error {
targets, err := t.expandFlushCollectionNames(ctx)
if err != nil {
return err
}
// get flush detail info from datacoord
resp, err := t.dataCoord.FlushAll(ctx, &datapb.FlushAllRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
DbName: t.GetDbName(),
FlushTargets: targets,
})
if err = merr.CheckRPCCall(resp, err); err != nil {
return fmt.Errorf("failed to call flush all to data coordinator: %s", err.Error())
}
dbResultsMap := lo.GroupBy(resp.GetFlushResults(), func(result *datapb.FlushResult) string {
return result.GetDbName()
})
results := make([]*milvuspb.FlushAllResult, 0)
for dbName, dbResults := range dbResultsMap {
results = append(results, &milvuspb.FlushAllResult{
DbName: dbName,
CollectionResults: lo.Map(dbResults, func(result *datapb.FlushResult, _ int) *milvuspb.FlushCollectionResult {
return &milvuspb.FlushCollectionResult{
CollectionName: result.GetCollectionName(),
SegmentIds: &schemapb.LongArray{Data: result.GetSegmentIDs()},
FlushSegmentIds: &schemapb.LongArray{Data: result.GetFlushSegmentIDs()},
SealTime: result.GetTimeOfSeal(),
FlushTs: result.GetFlushTs(),
ChannelCps: result.GetChannelCps(),
}
}),
})
}
t.result = &milvuspb.FlushAllResponse{
Status: merr.Success(),
FlushAllTs: resp.GetFlushTs(),
FlushResults: results,
}
return nil
}
func (t *flushAllTask) PostExecute(ctx context.Context) error {
return nil
}
func (t *flushAllTask) expandFlushCollectionNames(ctx context.Context) ([]*datapb.FlushAllTarget, error) {
// Determine which databases and collections to flush
targets := make([]*datapb.FlushAllTarget, 0)
if len(t.GetFlushTargets()) > 0 {
// Use flush_targets from request
for _, target := range t.GetFlushTargets() {
collectionIDs := make([]int64, 0)
for _, collectionName := range target.GetCollectionNames() {
collectionID, err := globalMetaCache.GetCollectionID(ctx, target.GetDbName(), collectionName)
if err != nil {
return nil, err
}
collectionIDs = append(collectionIDs, collectionID)
}
targets = append(targets, &datapb.FlushAllTarget{
DbName: target.GetDbName(),
CollectionIds: collectionIDs,
})
}
} else if t.GetDbName() != "" {
// Backward compatibility: use deprecated db_name field
targets = append(targets, &datapb.FlushAllTarget{
DbName: t.GetDbName(),
CollectionIds: []int64{},
})
} else {
// Flush all databases
listResp, err := t.rootCoord.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases)),
})
if err != nil {
log.Info("flush all task by streaming service failed, list databases failed", zap.Error(err))
return nil, err
}
for _, dbName := range listResp.GetDbNames() {
targets = append(targets, &datapb.FlushAllTarget{
DbName: dbName,
CollectionIds: []int64{},
})
}
}
// If CollectionNames is empty, it means flush all collections in this database
for _, target := range targets {
collectionNames := target.GetCollectionIds()
if len(collectionNames) == 0 {
showColRsp, err := t.rootCoord.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections)),
DbName: target.GetDbName(),
})
if err != nil {
return nil, err
}
target.CollectionIds = showColRsp.GetCollectionIds()
}
}
return targets, nil
}

View File

@ -0,0 +1,148 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"sync"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
type flushAllTaskbyStreamingService struct {
*flushAllTask
chMgr channelsMgr
}
func (t *flushAllTaskbyStreamingService) Execute(ctx context.Context) error {
flushTs := t.BeginTs()
timeOfSeal, _ := tsoutil.ParseTS(flushTs)
// Note: for now, flush will send flush signal to wal on streamnode, then get flush segment list from datacoord
// so we need to expand flush collection names to make sure that flushed collection list is same as each other
targets, err := t.expandFlushCollectionNames(ctx)
if err != nil {
return err
}
// send flush signal to wal on streamnode
onFlushSegmentMap, err := t.sendManualFlushAllToWal(ctx, targets, flushTs)
if err != nil {
return err
}
// get flush detail info from datacoord
resp, err := t.dataCoord.FlushAll(ctx, &datapb.FlushAllRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
DbName: t.GetDbName(),
FlushTargets: targets,
})
if err = merr.CheckRPCCall(resp, err); err != nil {
return fmt.Errorf("failed to call flush all to data coordinator: %s", err.Error())
}
dbResultsMap := lo.GroupBy(resp.GetFlushResults(), func(result *datapb.FlushResult) string {
return result.GetDbName()
})
results := make([]*milvuspb.FlushAllResult, 0)
for dbName, dbResults := range dbResultsMap {
results = append(results, &milvuspb.FlushAllResult{
DbName: dbName,
CollectionResults: lo.Map(dbResults, func(result *datapb.FlushResult, _ int) *milvuspb.FlushCollectionResult {
onFlushSegmentIDs := onFlushSegmentMap[result.GetCollectionID()]
// Remove the flushed segments from onFlushSegmentIDs
flushedSegmentSet := typeutil.NewUniqueSet(result.GetFlushSegmentIDs()...)
filteredSegments := make([]int64, 0, len(onFlushSegmentIDs))
for _, id := range onFlushSegmentIDs {
if !flushedSegmentSet.Contain(id) {
filteredSegments = append(filteredSegments, id)
}
}
onFlushSegmentIDs = filteredSegments
return &milvuspb.FlushCollectionResult{
CollectionName: result.GetCollectionName(),
SegmentIds: &schemapb.LongArray{Data: onFlushSegmentIDs},
FlushSegmentIds: &schemapb.LongArray{Data: result.GetFlushSegmentIDs()},
SealTime: timeOfSeal.Unix(),
FlushTs: flushTs,
ChannelCps: result.GetChannelCps(),
}
}),
})
}
t.result = &milvuspb.FlushAllResponse{
Status: merr.Success(),
FlushAllTs: flushTs,
FlushResults: results,
}
return nil
}
// todo: refine this by sending a single FlushAll message to wal
func (t *flushAllTaskbyStreamingService) sendManualFlushAllToWal(ctx context.Context, flushTargets []*datapb.FlushAllTarget, flushTs Timestamp) (map[int64][]int64, error) {
wg := errgroup.Group{}
// limit goroutine number to 100
wg.SetLimit(Params.DataCoordCfg.FlushAllMaxParallelTasks.GetAsInt())
var mu sync.Mutex
results := make(map[int64][]int64)
for _, target := range flushTargets {
for _, coll := range target.CollectionIds {
collID := coll
wg.Go(func() error {
vchannels, err := t.chMgr.getVChannels(collID)
if err != nil {
return err
}
onFlushSegmentIDs := make([]int64, 0)
// Ask the streamingnode to flush segments.
for _, vchannel := range vchannels {
segmentIDs, err := sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
if err != nil {
return err
}
onFlushSegmentIDs = append(onFlushSegmentIDs, segmentIDs...)
}
mu.Lock()
results[collID] = onFlushSegmentIDs
mu.Unlock()
return nil
})
}
}
err := wg.Wait()
if err != nil {
return nil, err
}
return results, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,530 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/uniquegenerator"
)
func createTestFlushAllTask(t *testing.T) (*flushAllTask, *mocks.MockRootCoordClient, *mocks.MockDataCoordClient, *msgstream.MockMsgStream, context.Context) {
ctx := context.Background()
mockRC := mocks.NewMockRootCoordClient(t)
mockDC := mocks.NewMockDataCoordClient(t)
replicateMsgStream := msgstream.NewMockMsgStream(t)
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 1,
Timestamp: uint64(time.Now().UnixNano()),
SourceID: 1,
},
},
ctx: ctx,
rootCoord: mockRC,
dataCoord: mockDC,
replicateMsgStream: replicateMsgStream,
}
return task, mockRC, mockDC, replicateMsgStream, ctx
}
func TestFlushAllTaskTraceCtx(t *testing.T) {
task, mockRC, _, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
traceCtx := task.TraceCtx()
assert.Equal(t, ctx, traceCtx)
}
func TestFlushAllTaskID(t *testing.T) {
task, mockRC, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test getting ID
originalID := task.ID()
assert.Equal(t, UniqueID(1), originalID)
// Test setting ID
newID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
task.SetID(newID)
assert.Equal(t, newID, task.ID())
}
func TestFlushAllTaskName(t *testing.T) {
task, mockRC, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
name := task.Name()
assert.Equal(t, FlushAllTaskName, name)
}
func TestFlushAllTaskType(t *testing.T) {
task, mockRC, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
msgType := task.Type()
assert.Equal(t, commonpb.MsgType_Flush, msgType)
}
func TestFlushAllTaskTimestampMethods(t *testing.T) {
task, mockRC, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
originalTs := task.BeginTs()
assert.Equal(t, originalTs, task.EndTs())
newTs := Timestamp(time.Now().UnixNano())
task.SetTs(newTs)
assert.Equal(t, newTs, task.BeginTs())
assert.Equal(t, newTs, task.EndTs())
}
func TestFlushAllTaskOnEnqueue(t *testing.T) {
ctx := context.Background()
mockRC := mocks.NewMockRootCoordClient(t)
mockDC := mocks.NewMockDataCoordClient(t)
defer mockRC.AssertExpectations(t)
// Test with nil Base
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{},
ctx: ctx,
rootCoord: mockRC,
dataCoord: mockDC,
}
err := task.OnEnqueue()
assert.NoError(t, err)
assert.NotNil(t, task.Base)
assert.Equal(t, commonpb.MsgType_Flush, task.Base.MsgType)
// Test with existing Base
task, _, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer replicateMsgStream.AssertExpectations(t)
err = task.OnEnqueue()
assert.NoError(t, err)
assert.Equal(t, commonpb.MsgType_Flush, task.Base.MsgType)
}
func TestFlushAllTaskPreExecute(t *testing.T) {
task, mockRC, _, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
err := task.PreExecute(ctx)
assert.NoError(t, err)
}
func TestFlushAllTaskExecuteSuccess(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
// Setup expectations for FlushAll
dataCoordResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(dataCoordResp, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
// Verify the result is properly constructed milvuspb.FlushAllResponse
assert.NotNil(t, task.result)
assert.Equal(t, merr.Success().ErrorCode, task.result.Status.ErrorCode)
assert.NotNil(t, task.result.FlushResults)
}
func TestFlushAllTaskExecuteFlushAllRPCError(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
// Test RPC call error
expectedErr := fmt.Errorf("rpc error")
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(nil, expectedErr).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to call flush all to data coordinator")
}
func TestFlushAllTaskExecuteFlushAllResponseError(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
// Test response with error status
errorResp := &datapb.FlushAllResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "flush all failed",
},
}
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(errorResp, nil).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to call flush all to data coordinator")
}
func TestFlushAllTaskExecuteWithMerCheck(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
// Test successful execution with merr.CheckRPCCall
dataCoordResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(dataCoordResp, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
// Verify the result is properly constructed milvuspb.FlushAllResponse
assert.NotNil(t, task.result)
assert.Equal(t, merr.Success().ErrorCode, task.result.Status.ErrorCode)
assert.NotNil(t, task.result.FlushResults)
}
func TestFlushAllTaskExecuteRequestContent(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
// Test the content of the FlushAllRequest sent to mixCoord
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(&datapb.FlushAllResponse{Status: merr.Success()}, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
// The test verifies that Execute method creates the correct request structure internally
// The actual request content validation is covered by other tests
}
func TestFlushAllTaskPostExecute(t *testing.T) {
task, _, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
err := task.PostExecute(ctx)
assert.NoError(t, err)
}
func TestFlushAllTaskLifecycle(t *testing.T) {
ctx := context.Background()
mockRC := mocks.NewMockRootCoordClient(t)
mockDC := mocks.NewMockDataCoordClient(t)
replicateMsgStream := msgstream.NewMockMsgStream(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test complete task lifecycle
// 1. OnEnqueue
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{},
ctx: ctx,
rootCoord: mockRC,
dataCoord: mockDC,
replicateMsgStream: replicateMsgStream,
}
err := task.OnEnqueue()
assert.NoError(t, err)
// 2. PreExecute
err = task.PreExecute(ctx)
assert.NoError(t, err)
// 3. Execute - Setup expectations for rootCoord calls
// Setup expectations for ListDatabases (called when no specific db is set)
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
// Setup expectations for ShowCollections (called to get collections in each database)
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
dataCoordResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(dataCoordResp, nil).Once()
err = task.Execute(ctx)
assert.NoError(t, err)
// 4. PostExecute
err = task.PostExecute(ctx)
assert.NoError(t, err)
// Verify task state - result should be milvuspb.FlushAllResponse
assert.NotNil(t, task.result)
assert.Equal(t, merr.Success().ErrorCode, task.result.Status.ErrorCode)
assert.NotNil(t, task.result.FlushResults)
}
func TestFlushAllTaskErrorHandlingInExecute(t *testing.T) {
// Test different error scenarios in Execute method
testCases := []struct {
name string
setupMock func(*mocks.MockRootCoordClient, *mocks.MockDataCoordClient)
expectedError string
}{
{
name: "mixCoord FlushAll returns error",
setupMock: func(mockRC *mocks.MockRootCoordClient, mockDC *mocks.MockDataCoordClient) {
// Setup expectations for ListDatabases and ShowCollections first
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(nil, fmt.Errorf("network error")).Once()
},
expectedError: "failed to call flush all to data coordinator",
},
{
name: "mixCoord FlushAll returns error status",
setupMock: func(mockRC *mocks.MockRootCoordClient, mockDC *mocks.MockDataCoordClient) {
// Setup expectations for ListDatabases and ShowCollections first
mockRC.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(&milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"default"},
}, nil).Once()
mockRC.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(&milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIds: []int64{1, 2, 3},
}, nil).Once()
mockDC.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(&datapb.FlushAllResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_IllegalArgument,
Reason: "invalid request",
},
}, nil).Once()
},
expectedError: "failed to call flush all to data coordinator",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
task, mockRC, mockDC, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mockRC.AssertExpectations(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
tc.setupMock(mockRC, mockDC)
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedError)
})
}
}
func TestFlushAllTaskImplementsTaskInterface(t *testing.T) {
// Verify that flushAllTask implements the task interface
var _ task = (*flushAllTask)(nil)
task, _, mockDC, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mockDC.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test all interface methods are accessible
assert.NotNil(t, task.TraceCtx)
assert.NotNil(t, task.ID)
assert.NotNil(t, task.SetID)
assert.NotNil(t, task.Name)
assert.NotNil(t, task.Type)
assert.NotNil(t, task.BeginTs)
assert.NotNil(t, task.EndTs)
assert.NotNil(t, task.SetTs)
assert.NotNil(t, task.OnEnqueue)
assert.NotNil(t, task.PreExecute)
assert.NotNil(t, task.Execute)
assert.NotNil(t, task.PostExecute)
}
func TestFlushAllTaskNilHandling(t *testing.T) {
// Test behavior with nil values
task := &flushAllTask{
FlushAllRequest: &milvuspb.FlushAllRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 1,
Timestamp: uint64(time.Now().UnixNano()),
SourceID: 1,
},
},
}
// Test TraceCtx with nil context
ctx := task.TraceCtx()
assert.Nil(t, ctx)
// Test ID with nil Base
id := task.ID()
assert.Equal(t, UniqueID(1), id)
// Test Type with nil Base
msgType := task.Type()
assert.Equal(t, commonpb.MsgType_Flush, msgType)
}
func TestFlushAllTaskConstantValues(t *testing.T) {
// Test that task name constant is correct
assert.Equal(t, "FlushAllTask", FlushAllTaskName)
// Test task name method returns correct constant
task := &flushAllTask{}
assert.Equal(t, FlushAllTaskName, task.Name())
}

View File

@ -33,6 +33,7 @@ import (
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
type flushTaskByStreamingService struct {
@ -63,7 +64,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
// Ask the streamingnode to flush segments.
for _, vchannel := range vchannels {
segmentIDs, err := t.sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
segmentIDs, err := sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
if err != nil {
return err
}
@ -84,14 +85,14 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
}
// Remove the flushed segments from onFlushSegmentIDs
for _, segID := range resp.GetFlushSegmentIDs() {
for i, id := range onFlushSegmentIDs {
if id == segID {
onFlushSegmentIDs = append(onFlushSegmentIDs[:i], onFlushSegmentIDs[i+1:]...)
break
}
flushedSegmentSet := typeutil.NewUniqueSet(resp.GetFlushSegmentIDs()...)
filteredSegments := make([]int64, 0, len(onFlushSegmentIDs))
for _, id := range onFlushSegmentIDs {
if !flushedSegmentSet.Contain(id) {
filteredSegments = append(filteredSegments, id)
}
}
onFlushSegmentIDs = filteredSegments
coll2Segments[collName] = &schemapb.LongArray{Data: onFlushSegmentIDs}
flushColl2Segments[collName] = &schemapb.LongArray{Data: resp.GetFlushSegmentIDs()}
@ -99,8 +100,6 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
coll2FlushTs[collName] = flushTs
channelCps = resp.GetChannelCps()
}
// TODO: refactor to use streaming service
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.FlushRequest)
t.result = &milvuspb.FlushResponse{
Status: merr.Success(),
DbName: t.GetDbName(),
@ -114,7 +113,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
}
// sendManualFlushToWAL sends a manual flush message to WAL.
func (t *flushTaskByStreamingService) sendManualFlushToWAL(ctx context.Context, collID int64, vchannel string, flushTs uint64) ([]int64, error) {
func sendManualFlushToWAL(ctx context.Context, collID int64, vchannel string, flushTs uint64) ([]int64, error) {
logger := log.Ctx(ctx).With(zap.Int64("collectionID", collID), zap.String("vchannel", vchannel))
flushMsg, err := message.NewManualFlushMessageBuilderV2().
WithVChannel(vchannel).

View File

@ -14,7 +14,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.7
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc
github.com/nats-io/nats-server/v2 v2.10.12
github.com/nats-io/nats.go v1.34.1
github.com/panjf2000/ants/v2 v2.11.3

View File

@ -498,8 +498,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119 h1:9VXijWu
github.com/milvus-io/cgosymbolizer v0.0.0-20240722103217-b7dee0e50119/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18 h1:BUMCAa4vS7apwQYVArHy2GTHdX3hUPAXh/ExyovJlZY=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc h1:WMkuIc+PJDma8JZjhwC4V91GDP7lLO1XPUU23PoXNQ0=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/pulsar-client-go v0.12.1 h1:O2JZp1tsYiO7C0MQ4hrUY/aJXnn2Gry6hpm7UodghmE=
github.com/milvus-io/pulsar-client-go v0.12.1/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=

View File

@ -34,6 +34,8 @@ service DataCoord {
rpc GetStatisticsChannel(internal.GetStatisticsChannelRequest) returns(milvus.StringResponse){}
rpc Flush(FlushRequest) returns (FlushResponse) {}
rpc FlushAll(FlushAllRequest) returns(FlushAllResponse) {}
// AllocSegment alloc a new growing segment, add it into segment meta.
rpc AllocSegment(AllocSegmentRequest) returns (AllocSegmentResponse) {}
@ -161,6 +163,43 @@ message FlushResponse {
map<string, msg.MsgPosition> channel_cps = 8;
}
message FlushResult {
int64 collectionID = 1;
repeated int64 segmentIDs =2; // newly sealed segments
repeated int64 flushSegmentIDs = 3; // old flushed segment
int64 timeOfSeal = 4;
uint64 flush_ts = 5;
map<string, msg.MsgPosition> channel_cps = 6;
string db_name = 7; // database name for this flush result
string collection_name = 8; // collection name for this flush result
}
message FlushAllRequest {
common.MsgBase base = 1;
string dbName = 2; // Deprecated: use flush_targets instead
// List of specific databases and collections to flush
repeated FlushAllTarget flush_targets = 3;
}
// Specific collection to flush with database context
// This message allows targeting specific collections within a database for flush operations
message FlushAllTarget {
// Database name to target for flush operation
string db_name = 1;
// Collections within this database to flush
// If empty, flush all collections in this database
repeated int64 collection_ids = 3;
}
message FlushAllResponse {
common.Status status = 1;
uint64 flushTs = 2;
// Detailed flush results for each target
repeated FlushResult flush_results = 3;
}
message FlushChannelsRequest {
common.MsgBase base = 1;
uint64 flush_ts = 2;

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,7 @@ const (
DataCoord_GetTimeTickChannel_FullMethodName = "/milvus.proto.data.DataCoord/GetTimeTickChannel"
DataCoord_GetStatisticsChannel_FullMethodName = "/milvus.proto.data.DataCoord/GetStatisticsChannel"
DataCoord_Flush_FullMethodName = "/milvus.proto.data.DataCoord/Flush"
DataCoord_FlushAll_FullMethodName = "/milvus.proto.data.DataCoord/FlushAll"
DataCoord_AllocSegment_FullMethodName = "/milvus.proto.data.DataCoord/AllocSegment"
DataCoord_AssignSegmentID_FullMethodName = "/milvus.proto.data.DataCoord/AssignSegmentID"
DataCoord_GetSegmentInfo_FullMethodName = "/milvus.proto.data.DataCoord/GetSegmentInfo"
@ -83,6 +84,7 @@ type DataCoordClient interface {
GetTimeTickChannel(ctx context.Context, in *internalpb.GetTimeTickChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
GetStatisticsChannel(ctx context.Context, in *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error)
FlushAll(ctx context.Context, in *FlushAllRequest, opts ...grpc.CallOption) (*FlushAllResponse, error)
// AllocSegment alloc a new growing segment, add it into segment meta.
AllocSegment(ctx context.Context, in *AllocSegmentRequest, opts ...grpc.CallOption) (*AllocSegmentResponse, error)
// Deprecated: Do not use.
@ -182,6 +184,15 @@ func (c *dataCoordClient) Flush(ctx context.Context, in *FlushRequest, opts ...g
return out, nil
}
func (c *dataCoordClient) FlushAll(ctx context.Context, in *FlushAllRequest, opts ...grpc.CallOption) (*FlushAllResponse, error) {
out := new(FlushAllResponse)
err := c.cc.Invoke(ctx, DataCoord_FlushAll_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dataCoordClient) AllocSegment(ctx context.Context, in *AllocSegmentRequest, opts ...grpc.CallOption) (*AllocSegmentResponse, error) {
out := new(AllocSegmentResponse)
err := c.cc.Invoke(ctx, DataCoord_AllocSegment_FullMethodName, in, out, opts...)
@ -605,6 +616,7 @@ type DataCoordServer interface {
GetTimeTickChannel(context.Context, *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error)
GetStatisticsChannel(context.Context, *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error)
Flush(context.Context, *FlushRequest) (*FlushResponse, error)
FlushAll(context.Context, *FlushAllRequest) (*FlushAllResponse, error)
// AllocSegment alloc a new growing segment, add it into segment meta.
AllocSegment(context.Context, *AllocSegmentRequest) (*AllocSegmentResponse, error)
// Deprecated: Do not use.
@ -676,6 +688,9 @@ func (UnimplementedDataCoordServer) GetStatisticsChannel(context.Context, *inter
func (UnimplementedDataCoordServer) Flush(context.Context, *FlushRequest) (*FlushResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented")
}
func (UnimplementedDataCoordServer) FlushAll(context.Context, *FlushAllRequest) (*FlushAllResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FlushAll not implemented")
}
func (UnimplementedDataCoordServer) AllocSegment(context.Context, *AllocSegmentRequest) (*AllocSegmentResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocSegment not implemented")
}
@ -898,6 +913,24 @@ func _DataCoord_Flush_Handler(srv interface{}, ctx context.Context, dec func(int
return interceptor(ctx, in, info, handler)
}
func _DataCoord_FlushAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FlushAllRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DataCoordServer).FlushAll(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: DataCoord_FlushAll_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DataCoordServer).FlushAll(ctx, req.(*FlushAllRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DataCoord_AllocSegment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AllocSegmentRequest)
if err := dec(in); err != nil {
@ -1749,6 +1782,10 @@ var DataCoord_ServiceDesc = grpc.ServiceDesc{
MethodName: "Flush",
Handler: _DataCoord_Flush_Handler,
},
{
MethodName: "FlushAll",
Handler: _DataCoord_FlushAll_Handler,
},
{
MethodName: "AllocSegment",
Handler: _DataCoord_AllocSegment_Handler,

View File

@ -3919,6 +3919,8 @@ type dataCoordConfig struct {
JSONKeyStatsMemoryBudgetInTantivy ParamItem `refreshable:"false"`
RequestTimeoutSeconds ParamItem `refreshable:"true"`
FlushAllMaxParallelTasks ParamItem `refreshable:"true"`
}
func (p *dataCoordConfig) init(base *BaseTable) {
@ -4965,6 +4967,15 @@ if param targetVecIndexVersion is not set, the default value is -1, which means
Export: true,
}
p.EnabledJSONKeyStatsInSort.Init(base.mgr)
p.FlushAllMaxParallelTasks = ParamItem{
Key: "dataCoord.flushAllMaxParallelTasks",
Version: "2.6.3",
DefaultValue: "100",
Doc: "The maximum number of parallel tasks for flushing all segments",
Export: false,
}
p.FlushAllMaxParallelTasks.Init(base.mgr)
}
// /////////////////////////////////////////////////////////////////////////////

View File

@ -560,6 +560,8 @@ func TestComponentParam(t *testing.T) {
assert.Equal(t, 500*time.Second, Params.TaskCheckInterval.GetAsDuration(time.Second))
params.Save("datacoord.statsTaskTriggerCount", "3")
assert.Equal(t, 3, Params.StatsTaskTriggerCount.GetAsInt())
assert.Equal(t, 100, Params.FlushAllMaxParallelTasks.GetAsInt())
})
t.Run("test dataNodeConfig", func(t *testing.T) {

View File

@ -36,14 +36,14 @@ fi
# starting the timer
beginTime=`date +%s`
pushd cmd/tools
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic ./...
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic ./...
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out
fi
popd
for d in $(go list ./internal/... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ${FILE_COVERAGE_INFO}
rm profile.out
@ -51,7 +51,7 @@ for d in $(go list ./internal/... | grep -v -e vendor -e kafka -e planparserv2/g
done
pushd pkg
for d in $(go list ./... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out
@ -61,7 +61,7 @@ popd
# milvusclient
pushd client
for d in $(go list ./... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out

View File

@ -60,75 +60,75 @@ done
function test_proxy()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_querynode()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/querynodev2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querynode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/querynodev2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querynode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_kv()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/kv/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/kv/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_mq()
{
go test -race -cover -tags dynamic,test $(go list "${MILVUS_DIR}/mq/..." | grep -v kafka) -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test $(go list "${MILVUS_DIR}/mq/..." | grep -v kafka) -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_storage()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/storage" -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/storage" -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_allocator()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/allocator/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/allocator/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_tso()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/tso/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/tso/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_util()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/funcutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/funcutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/util/retry/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/util/retry/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/sessionutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/typeutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/importutilv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/proxyutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/initcore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/cgo/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/sessionutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/typeutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/importutilv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/proxyutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/initcore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/cgo/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_pkg()
{
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/common/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/config/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/log/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/mq/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/tracer/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/util/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/common/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/config/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/log/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/mq/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/tracer/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/util/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
}
function test_datanode
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
@ -139,40 +139,40 @@ go test -race -cover -tags dynamic,test "${MILVUS_DIR}/indexnode/..." -failfast
function test_rootcoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/rootcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/rootcoord" -failfast -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/rootcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/rootcoord" -failfast -ldflags="-r ${RPATH}"
}
function test_datacoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_querycoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querycoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/querycoordv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querycoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/querycoordv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_metastore()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/metastore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/metastore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_cmd()
{
go test -race -cover -tags dynamic,test "${ROOT_DIR}/cmd/tools/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${ROOT_DIR}/cmd/tools/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_streaming()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingnode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingnode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
}

View File

@ -53,7 +53,7 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18 // indirect
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect

View File

@ -326,8 +326,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18 h1:BUMCAa4vS7apwQYVArHy2GTHdX3hUPAXh/ExyovJlZY=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.18/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc h1:WMkuIc+PJDma8JZjhwC4V91GDP7lLO1XPUU23PoXNQ0=
github.com/milvus-io/milvus-proto/go-api/v2 v2.5.19-0.20250923105556-f95cf93c47bc/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
github.com/milvus-io/milvus/pkg/v2 v2.5.7 h1:b45jq1s1v03AekFucs2/dkkXohB57gEx7gspJuAkfbY=
github.com/milvus-io/milvus/pkg/v2 v2.5.7/go.mod h1:pImw1IGNS7k/5yvlZV2tZi5vZu1VQRlQij+r39d+XnI=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=