enhance: Optimize FlushAll performance for multi-table scenarios (#43339)

Replace multiple per-table flush RPC calls with single FlushAll RPC to
improve performance in multi-table scenarios.
issue: #43338
- Implement server-side FlushAll request processing in
DataCoord/MixCoord
- Add flushAllTask to handle unified flush operations across all tables
- Replace proxy-side per-table flush iteration with single RPC call
- Support both streaming and non-streaming service execution paths
- Add comprehensive unit tests for new FlushAll implementation

---------

Signed-off-by: Wei Liu <wei.liu@zilliz.com>
This commit is contained in:
wei liu 2025-07-30 15:37:37 +08:00 committed by GitHub
parent 1718b0d141
commit 1fae8f5ae3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 5355 additions and 3267 deletions

View File

@ -1077,3 +1077,7 @@ func (s *mixCoordImpl) GetQuotaMetrics(ctx context.Context, req *internalpb.GetQ
func (s *mixCoordImpl) ListLoadedSegments(ctx context.Context, req *querypb.ListLoadedSegmentsRequest) (*querypb.ListLoadedSegmentsResponse, error) {
return s.queryCoordServer.ListLoadedSegments(ctx, req)
}
func (s *mixCoordImpl) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
return s.datacoordServer.FlushAll(ctx, req)
}

View File

@ -6,14 +6,13 @@
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package coordinator
import (
@ -23,14 +22,19 @@ import (
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/datacoord"
"github.com/milvus-io/milvus/internal/util/dependency"
kvfactory "github.com/milvus-io/milvus/internal/util/dependency/kv"
"github.com/milvus-io/milvus/internal/util/testutil"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/etcd"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/tikv"
)
@ -149,3 +153,39 @@ func TestMixcoord_DisableActiveStandby(t *testing.T) {
err = core.Stop()
assert.NoError(t, err)
}
func TestMixCoord_FlushAll(t *testing.T) {
t.Run("success", func(t *testing.T) {
mockey.PatchConvey("test flush all success", t, func() {
mockDataCoord := &datacoord.Server{}
coord := &mixCoordImpl{
datacoordServer: mockDataCoord,
}
expectedResp := &datapb.FlushAllResponse{
Status: merr.Success(),
FlushTs: 12345,
}
mockey.Mock((*datacoord.Server).FlushAll).Return(expectedResp, nil).Build()
resp, err := coord.FlushAll(context.Background(), &datapb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, expectedResp, resp)
})
})
t.Run("failure", func(t *testing.T) {
mockey.PatchConvey("test flush all failure", t, func() {
mockDataCoord := &datacoord.Server{}
coord := &mixCoordImpl{
datacoordServer: mockDataCoord,
}
expectedErr := errors.New("mock flush all error")
mockey.Mock((*datacoord.Server).FlushAll).Return(nil, expectedErr).Build()
resp, err := coord.FlushAll(context.Background(), &datapb.FlushAllRequest{})
assert.Error(t, err)
assert.Equal(t, expectedErr, err)
assert.Nil(t, resp)
})
})
}

View File

@ -38,7 +38,7 @@ type Broker interface {
DescribeCollectionInternal(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error)
ShowPartitionsInternal(ctx context.Context, collectionID int64) ([]int64, error)
ShowCollections(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error)
ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)
ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)
ListDatabases(ctx context.Context) (*milvuspb.ListDatabasesResponse, error)
HasCollection(ctx context.Context, collectionID int64) (bool, error)
}
@ -118,7 +118,7 @@ func (b *coordinatorBroker) ShowCollections(ctx context.Context, dbName string)
return resp, nil
}
func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error) {
func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
ctx, cancel := context.WithTimeout(ctx, paramtable.Get().QueryCoordCfg.BrokerTimeout.GetAsDuration(time.Millisecond))
defer cancel()
resp, err := b.mixCoord.ShowCollectionIDs(ctx, &rootcoordpb.ShowCollectionIDsRequest{
@ -126,6 +126,7 @@ func (b *coordinatorBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
),
AllowUnavailable: true,
DbNames: dbNames,
})
if err = merr.CheckRPCCall(resp, err); err != nil {

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
@ -267,6 +268,67 @@ func (s *BrokerSuite) TestHasCollection() {
})
}
func (s *BrokerSuite) TestShowCollectionIDs() {
s.Run("normal", func() {
s.SetupTest()
dbName := "test_db"
expectedIDs := []int64{1, 2, 3}
s.mixCoord.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, req *rootcoordpb.ShowCollectionIDsRequest) (*rootcoordpb.ShowCollectionIDsResponse, error) {
s.Equal([]string{dbName}, req.GetDbNames())
return &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: []*rootcoordpb.DBCollections{
{
DbName: dbName,
CollectionIDs: expectedIDs,
},
},
}, nil
})
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.NoError(err)
s.NotNil(resp)
s.Len(resp.GetDbCollections(), 1)
s.Equal(dbName, resp.GetDbCollections()[0].GetDbName())
s.ElementsMatch(expectedIDs, resp.GetDbCollections()[0].GetCollectionIDs())
s.TearDownTest()
})
s.Run("rpc_error", func() {
s.SetupTest()
dbName := "test_db"
expectedErr := errors.New("mock rpc error")
s.mixCoord.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).Return(nil, expectedErr)
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.Error(err)
s.Equal(expectedErr, err)
s.Nil(resp)
s.TearDownTest()
})
s.Run("milvus_error", func() {
s.SetupTest()
dbName := "test_db"
expectedErr := merr.ErrDatabaseNotFound
s.mixCoord.EXPECT().ShowCollectionIDs(mock.Anything, mock.Anything).Return(&rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Status(expectedErr),
}, nil)
resp, err := s.broker.ShowCollectionIDs(context.Background(), dbName)
s.Error(err)
s.ErrorIs(err, expectedErr)
s.Nil(resp)
s.TearDownTest()
})
}
func TestBrokerSuite(t *testing.T) {
suite.Run(t, new(BrokerSuite))
}

View File

@ -198,9 +198,16 @@ func (_c *MockBroker_ListDatabases_Call) RunAndReturn(run func(context.Context)
return _c
}
// ShowCollectionIDs provides a mock function with given fields: ctx
func (_m *MockBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error) {
ret := _m.Called(ctx)
// ShowCollectionIDs provides a mock function with given fields: ctx, dbNames
func (_m *MockBroker) ShowCollectionIDs(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
_va := make([]interface{}, len(dbNames))
for _i := range dbNames {
_va[_i] = dbNames[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for ShowCollectionIDs")
@ -208,19 +215,19 @@ func (_m *MockBroker) ShowCollectionIDs(ctx context.Context) (*rootcoordpb.ShowC
var r0 *rootcoordpb.ShowCollectionIDsResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)); ok {
return rf(ctx)
if rf, ok := ret.Get(0).(func(context.Context, ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)); ok {
return rf(ctx, dbNames...)
}
if rf, ok := ret.Get(0).(func(context.Context) *rootcoordpb.ShowCollectionIDsResponse); ok {
r0 = rf(ctx)
if rf, ok := ret.Get(0).(func(context.Context, ...string) *rootcoordpb.ShowCollectionIDsResponse); ok {
r0 = rf(ctx, dbNames...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*rootcoordpb.ShowCollectionIDsResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
if rf, ok := ret.Get(1).(func(context.Context, ...string) error); ok {
r1 = rf(ctx, dbNames...)
} else {
r1 = ret.Error(1)
}
@ -235,13 +242,21 @@ type MockBroker_ShowCollectionIDs_Call struct {
// ShowCollectionIDs is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockBroker_Expecter) ShowCollectionIDs(ctx interface{}) *MockBroker_ShowCollectionIDs_Call {
return &MockBroker_ShowCollectionIDs_Call{Call: _e.mock.On("ShowCollectionIDs", ctx)}
// - dbNames ...string
func (_e *MockBroker_Expecter) ShowCollectionIDs(ctx interface{}, dbNames ...interface{}) *MockBroker_ShowCollectionIDs_Call {
return &MockBroker_ShowCollectionIDs_Call{Call: _e.mock.On("ShowCollectionIDs",
append([]interface{}{ctx}, dbNames...)...)}
}
func (_c *MockBroker_ShowCollectionIDs_Call) Run(run func(ctx context.Context)) *MockBroker_ShowCollectionIDs_Call {
func (_c *MockBroker_ShowCollectionIDs_Call) Run(run func(ctx context.Context, dbNames ...string)) *MockBroker_ShowCollectionIDs_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context))
variadicArgs := make([]string, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(string)
}
}
run(args[0].(context.Context), variadicArgs...)
})
return _c
}
@ -251,7 +266,7 @@ func (_c *MockBroker_ShowCollectionIDs_Call) Return(_a0 *rootcoordpb.ShowCollect
return _c
}
func (_c *MockBroker_ShowCollectionIDs_Call) RunAndReturn(run func(context.Context) (*rootcoordpb.ShowCollectionIDsResponse, error)) *MockBroker_ShowCollectionIDs_Call {
func (_c *MockBroker_ShowCollectionIDs_Call) RunAndReturn(run func(context.Context, ...string) (*rootcoordpb.ShowCollectionIDsResponse, error)) *MockBroker_ShowCollectionIDs_Call {
_c.Call.Return(run)
return _c
}

View File

@ -207,6 +207,10 @@ func (m *mockMixCoord) ListLoadedSegments(ctx context.Context, req *querypb.List
}, nil
}
func (m *mockMixCoord) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
panic("implement me")
}
func newMockMixCoord() *mockMixCoord {
return &mockMixCoord{state: commonpb.StateCode_Healthy}
}

View File

@ -27,6 +27,7 @@ import (
"github.com/samber/lo"
"go.opentelemetry.io/otel"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
@ -85,18 +86,40 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
}, nil
}
channelCPs := make(map[string]*msgpb.MsgPosition, 0)
coll, err := s.handler.GetCollection(ctx, req.GetCollectionID())
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return nil, err
}
flushResult, err := s.flushCollection(ctx, req.GetCollectionID(), ts, req.GetSegmentIDs())
if err != nil {
log.Warn("fail to get collection", zap.Error(err))
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
}
return &datapb.FlushResponse{
Status: merr.Success(),
DbID: req.GetDbID(),
CollectionID: req.GetCollectionID(),
SegmentIDs: flushResult.GetSegmentIDs(),
TimeOfSeal: flushResult.GetTimeOfSeal(),
FlushSegmentIDs: flushResult.GetFlushSegmentIDs(),
FlushTs: flushResult.GetFlushTs(),
ChannelCps: flushResult.GetChannelCps(),
}, nil
}
func (s *Server) flushCollection(ctx context.Context, collectionID UniqueID, flushTs uint64, toFlushSegments []UniqueID) (*datapb.FlushResult, error) {
channelCPs := make(map[string]*msgpb.MsgPosition, 0)
coll, err := s.handler.GetCollection(ctx, collectionID)
if err != nil {
log.Warn("fail to get collection", zap.Error(err))
return nil, err
}
if coll == nil {
return &datapb.FlushResponse{
Status: merr.Status(merr.WrapErrCollectionNotFound(req.GetCollectionID())),
}, nil
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
// channel checkpoints must be gotten before sealSegment, make sure checkpoints is earlier than segment's endts
for _, vchannel := range coll.VChannelNames {
@ -104,26 +127,14 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
channelCPs[vchannel] = cp
}
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
}
timeOfSeal, _ := tsoutil.ParseTS(ts)
timeOfSeal, _ := tsoutil.ParseTS(flushTs)
sealedSegmentsIDDict := make(map[UniqueID]bool)
if !streamingutil.IsStreamingServiceEnabled() {
for _, channel := range coll.VChannelNames {
sealedSegmentIDs, err := s.segmentManager.SealAllSegments(ctx, channel, req.GetSegmentIDs())
sealedSegmentIDs, err := s.segmentManager.SealAllSegments(ctx, channel, toFlushSegments)
if err != nil {
return &datapb.FlushResponse{
Status: merr.Status(errors.Wrapf(err, "failed to flush collection %d",
req.GetCollectionID())),
}, nil
return nil, errors.Wrapf(err, "failed to flush collection %d", collectionID)
}
for _, sealedSegmentID := range sealedSegmentIDs {
sealedSegmentsIDDict[sealedSegmentID] = true
@ -131,7 +142,7 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
}
}
segments := s.meta.GetSegmentsOfCollection(ctx, req.GetCollectionID())
segments := s.meta.GetSegmentsOfCollection(ctx, collectionID)
flushSegmentIDs := make([]UniqueID, 0, len(segments))
for _, segment := range segments {
if segment != nil &&
@ -145,10 +156,10 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
if !streamingutil.IsStreamingServiceEnabled() {
var isUnimplemented bool
err = retry.Do(ctx, func() error {
nodeChannels := s.channelManager.GetNodeChannelsByCollectionID(req.GetCollectionID())
nodeChannels := s.channelManager.GetNodeChannelsByCollectionID(collectionID)
for nodeID, channelNames := range nodeChannels {
err = s.cluster.FlushChannels(ctx, nodeID, ts, channelNames)
err = s.cluster.FlushChannels(ctx, nodeID, flushTs, channelNames)
if err != nil && errors.Is(err, merr.ErrServiceUnimplemented) {
isUnimplemented = true
return nil
@ -160,39 +171,92 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
return nil
}, retry.Attempts(60)) // about 3min
if err != nil {
return &datapb.FlushResponse{
Status: merr.Status(err),
}, nil
return nil, err
}
if isUnimplemented {
// For compatible with rolling upgrade from version 2.2.x,
// fall back to the flush logic of version 2.2.x;
log.Warn("DataNode FlushChannels unimplemented", zap.Error(err))
ts = 0
flushTs = 0
}
}
log.Info("flush response with segments",
zap.Int64("collectionID", req.GetCollectionID()),
zap.Int64("collectionID", collectionID),
zap.Int64s("sealSegments", lo.Keys(sealedSegmentsIDDict)),
zap.Int("flushedSegmentsCount", len(flushSegmentIDs)),
zap.Time("timeOfSeal", timeOfSeal),
zap.Uint64("flushTs", ts),
zap.Time("flushTs in time", tsoutil.PhysicalTime(ts)))
zap.Uint64("flushTs", flushTs),
zap.Time("flushTs in time", tsoutil.PhysicalTime(flushTs)))
return &datapb.FlushResponse{
Status: merr.Success(),
DbID: req.GetDbID(),
CollectionID: req.GetCollectionID(),
return &datapb.FlushResult{
CollectionID: collectionID,
SegmentIDs: lo.Keys(sealedSegmentsIDDict),
TimeOfSeal: timeOfSeal.Unix(),
FlushSegmentIDs: flushSegmentIDs,
FlushTs: ts,
FlushTs: flushTs,
ChannelCps: channelCPs,
}, nil
}
func (s *Server) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
log := log.Ctx(ctx)
log.Info("receive flushAll request")
ctx, sp := otel.Tracer(typeutil.DataCoordRole).Start(ctx, "DataCoord-Flush")
defer sp.End()
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
log.Info("server is not healthy", zap.Error(err), zap.Any("stateCode", s.GetStateCode()))
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
resp, err := s.broker.ShowCollectionIDs(ctx, req.GetDbName())
if err != nil {
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
ts, err := s.allocator.AllocTimestamp(ctx)
if err != nil {
log.Warn("unable to alloc timestamp", zap.Error(err))
return nil, err
}
dbCollections := resp.GetDbCollections()
wg := errgroup.Group{}
// limit goroutine number to 100
wg.SetLimit(100)
for _, dbCollection := range dbCollections {
for _, collectionID := range dbCollection.GetCollectionIDs() {
cid := collectionID
wg.Go(func() error {
_, err := s.flushCollection(ctx, cid, ts, nil)
if err != nil {
log.Warn("failed to flush collection", zap.Int64("collectionID", cid), zap.Error(err))
return err
}
return nil
})
}
}
err = wg.Wait()
if err != nil {
return &datapb.FlushAllResponse{
Status: merr.Status(err),
}, nil
}
return &datapb.FlushAllResponse{
Status: merr.Success(),
FlushTs: ts,
}, nil
}
// AssignSegmentID applies for segment ids and make allocation for records.
func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentIDRequest) (*datapb.AssignSegmentIDResponse, error) {
log := log.Ctx(ctx)

View File

@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
@ -33,6 +34,7 @@ import (
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
@ -1837,3 +1839,165 @@ func (s *GcControlServiceSuite) TestTimeoutCtx() {
func TestGcControlService(t *testing.T) {
suite.Run(t, new(GcControlServiceSuite))
}
func TestFlushAll(t *testing.T) {
ctx := context.Background()
dbName := "test_db"
req := &datapb.FlushAllRequest{
DbName: dbName,
}
// Test normal case
mockey.PatchConvey("normal case", t, func() {
testServer := &Server{
allocator: allocator.NewRootCoordAllocator(nil),
broker: broker.NewCoordinatorBroker(nil),
}
// Mock GetStateCode
mockey.Mock(mockey.GetMethod(testServer, "GetStateCode")).
Return(commonpb.StateCode_Healthy).Build()
// Mock AllocTimestamp
expectedTs := uint64(12345)
mockey.Mock(mockey.GetMethod(testServer.allocator, "AllocTimestamp")).
Return(expectedTs, nil).Build()
// Mock broker.ShowCollectionIDs
mockDbCollections := []*rootcoordpb.DBCollections{
{
DbName: dbName,
CollectionIDs: []int64{1, 2, 3},
},
}
showCollectionResp := &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: mockDbCollections,
}
mockey.Mock(mockey.GetMethod(testServer.broker, "ShowCollectionIDs")).
Return(showCollectionResp, nil).Build()
// Mock flushCollection
flushResult := &datapb.FlushResult{
CollectionID: 1,
SegmentIDs: []int64{10, 11},
TimeOfSeal: 123456789,
FlushSegmentIDs: []int64{20, 21},
FlushTs: expectedTs,
ChannelCps: make(map[string]*msgpb.MsgPosition),
}
mockey.Mock((*Server).flushCollection).Return(flushResult, nil).Build()
// Execute test
resp, err := testServer.FlushAll(ctx, req)
// Verify results
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, int64(expectedTs), int64(resp.GetFlushTs()))
})
// Test server unhealthy case
mockey.PatchConvey("server unhealthy", t, func() {
testServer := &Server{
allocator: allocator.NewRootCoordAllocator(nil),
broker: broker.NewCoordinatorBroker(nil),
}
mockey.Mock(mockey.GetMethod(testServer, "GetStateCode")).
Return(commonpb.StateCode_Abnormal).Build()
resp, err := testServer.FlushAll(ctx, req)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
// Test ShowCollectionIDs failed case
mockey.PatchConvey("ShowCollectionIDs failed", t, func() {
testServer := &Server{
allocator: allocator.NewRootCoordAllocator(nil),
broker: broker.NewCoordinatorBroker(nil),
}
mockey.Mock(mockey.GetMethod(testServer, "GetStateCode")).
Return(commonpb.StateCode_Healthy).Build()
expectedErr := errors.New("mock ShowCollectionIDs error")
mockey.Mock(mockey.GetMethod(testServer.broker, "ShowCollectionIDs")).
Return(nil, expectedErr).Build()
resp, err := testServer.FlushAll(ctx, req)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
// Test AllocTimestamp failed case
mockey.PatchConvey("AllocTimestamp failed", t, func() {
testServer := &Server{
allocator: allocator.NewRootCoordAllocator(nil),
broker: broker.NewCoordinatorBroker(nil),
}
mockey.Mock(mockey.GetMethod(testServer, "GetStateCode")).
Return(commonpb.StateCode_Healthy).Build()
mockDbCollections := []*rootcoordpb.DBCollections{
{
DbName: dbName,
CollectionIDs: []int64{1},
},
}
showCollectionResp := &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: mockDbCollections,
}
mockey.Mock(mockey.GetMethod(testServer.broker, "ShowCollectionIDs")).
Return(showCollectionResp, nil).Build()
expectedErr := errors.New("mock AllocTimestamp error")
mockey.Mock(mockey.GetMethod(testServer.allocator, "AllocTimestamp")).
Return(uint64(0), expectedErr).Build()
resp, err := testServer.FlushAll(ctx, req)
assert.Error(t, err)
assert.Nil(t, resp)
})
// Test flushCollection failed case
mockey.PatchConvey("flushCollection failed", t, func() {
testServer := &Server{
allocator: allocator.NewRootCoordAllocator(nil),
broker: broker.NewCoordinatorBroker(nil),
}
mockey.Mock(mockey.GetMethod(testServer, "GetStateCode")).
Return(commonpb.StateCode_Healthy).Build()
mockDbCollections := []*rootcoordpb.DBCollections{
{
DbName: dbName,
CollectionIDs: []int64{1},
},
}
showCollectionResp := &rootcoordpb.ShowCollectionIDsResponse{
Status: merr.Success(),
DbCollections: mockDbCollections,
}
mockey.Mock(mockey.GetMethod(testServer.broker, "ShowCollectionIDs")).
Return(showCollectionResp, nil).Build()
expectedTs := uint64(12345)
mockey.Mock(mockey.GetMethod(testServer.allocator, "AllocTimestamp")).
Return(expectedTs, nil).Build()
expectedErr := errors.New("mock flushCollection error")
mockey.Mock((*Server).flushCollection).Return(nil, expectedErr).Build()
resp, err := testServer.FlushAll(ctx, req)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
}

View File

@ -852,6 +852,17 @@ func (c *Client) Flush(ctx context.Context, req *datapb.FlushRequest, opts ...gr
})
}
func (c *Client) FlushAll(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
return wrapGrpcCall(ctx, c, func(client MixCoordClient) (*datapb.FlushAllResponse, error) {
return client.FlushAll(ctx, req)
})
}
// AssignSegmentID applies allocations for specified Coolection/Partition and related Channel Name(Virtial Channel)
//
// ctx is the context to control request deadline and cancellation

View File

@ -2588,3 +2588,58 @@ func Test_GetQuotaMetrics(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, `{"proxy": {"tasks": 100}}`, resp.GetMetricsInfo())
}
func Test_FlushAll(t *testing.T) {
paramtable.Init()
ctx := context.Background()
client, err := NewClient(ctx)
assert.NoError(t, err)
assert.NotNil(t, client)
defer client.Close()
mockDC := mocks.NewMockDataCoordClient(t)
mockmix := MixCoordClient{
DataCoordClient: mockDC,
}
mockGrpcClient := mocks.NewMockGrpcClient[MixCoordClient](t)
mockGrpcClient.EXPECT().Close().Return(nil)
mockGrpcClient.EXPECT().GetNodeID().Return(1)
mockGrpcClient.EXPECT().ReCall(mock1.Anything, mock1.Anything).RunAndReturn(func(ctx context.Context, f func(MixCoordClient) (interface{}, error)) (interface{}, error) {
return f(mockmix)
})
client.(*Client).grpcClient = mockGrpcClient
// test success
mockDC.EXPECT().FlushAll(mock1.Anything, mock1.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Success(),
}, nil)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.Nil(t, err)
// test return error status
mockDC.ExpectedCalls = nil
mockDC.EXPECT().FlushAll(mock1.Anything, mock1.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Status(merr.ErrServiceNotReady),
}, nil)
rsp, err := client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.NotEqual(t, int32(0), rsp.GetStatus().GetCode())
assert.Nil(t, err)
// test return error
mockDC.ExpectedCalls = nil
mockDC.EXPECT().FlushAll(mock1.Anything, mock1.Anything).Return(&datapb.FlushAllResponse{
Status: merr.Success(),
}, mockErr)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.NotNil(t, err)
// test ctx done
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
time.Sleep(20 * time.Millisecond)
_, err = client.FlushAll(ctx, &datapb.FlushAllRequest{})
assert.ErrorIs(t, err, context.DeadlineExceeded)
}

View File

@ -712,6 +712,10 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
return s.mixCoord.Flush(ctx, req)
}
func (s *Server) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
return s.mixCoord.FlushAll(ctx, req)
}
// AssignSegmentID requests to allocate segment space for insert
func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentIDRequest) (*datapb.AssignSegmentIDResponse, error) {
return s.mixCoord.AssignSegmentID(ctx, req)

View File

@ -752,4 +752,12 @@ func Test_NewServer(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, `{"proxy": {"tasks": 100}}`, resp.GetMetricsInfo())
})
t.Run("FlushAll", func(t *testing.T) {
req := &datapb.FlushAllRequest{}
mockMixCoord.EXPECT().FlushAll(mock.Anything, req).Return(&datapb.FlushAllResponse{Status: merr.Success()}, nil)
resp, err := server.FlushAll(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
}

View File

@ -626,6 +626,65 @@ func (_c *MockDataCoord_Flush_Call) RunAndReturn(run func(context.Context, *data
return _c
}
// FlushAll provides a mock function with given fields: _a0, _a1
func (_m *MockDataCoord) FlushAll(_a0 context.Context, _a1 *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) *datapb.FlushAllResponse); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockDataCoord_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MockDataCoord_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - _a0 context.Context
// - _a1 *datapb.FlushAllRequest
func (_e *MockDataCoord_Expecter) FlushAll(_a0 interface{}, _a1 interface{}) *MockDataCoord_FlushAll_Call {
return &MockDataCoord_FlushAll_Call{Call: _e.mock.On("FlushAll", _a0, _a1)}
}
func (_c *MockDataCoord_FlushAll_Call) Run(run func(_a0 context.Context, _a1 *datapb.FlushAllRequest)) *MockDataCoord_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest))
})
return _c
}
func (_c *MockDataCoord_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MockDataCoord_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockDataCoord_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)) *MockDataCoord_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: _a0, _a1
func (_m *MockDataCoord) GcConfirm(_a0 context.Context, _a1 *datapb.GcConfirmRequest) (*datapb.GcConfirmResponse, error) {
ret := _m.Called(_a0, _a1)

View File

@ -818,6 +818,80 @@ func (_c *MockDataCoordClient_Flush_Call) RunAndReturn(run func(context.Context,
return _c
}
// FlushAll provides a mock function with given fields: ctx, in, opts
func (_m *MockDataCoordClient) FlushAll(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) *datapb.FlushAllResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockDataCoordClient_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MockDataCoordClient_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - ctx context.Context
// - in *datapb.FlushAllRequest
// - opts ...grpc.CallOption
func (_e *MockDataCoordClient_Expecter) FlushAll(ctx interface{}, in interface{}, opts ...interface{}) *MockDataCoordClient_FlushAll_Call {
return &MockDataCoordClient_FlushAll_Call{Call: _e.mock.On("FlushAll",
append([]interface{}{ctx, in}, opts...)...)}
}
func (_c *MockDataCoordClient_FlushAll_Call) Run(run func(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption)) *MockDataCoordClient_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]grpc.CallOption, len(args)-2)
for i, a := range args[2:] {
if a != nil {
variadicArgs[i] = a.(grpc.CallOption)
}
}
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest), variadicArgs...)
})
return _c
}
func (_c *MockDataCoordClient_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MockDataCoordClient_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockDataCoordClient_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)) *MockDataCoordClient_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: ctx, in, opts
func (_m *MockDataCoordClient) GcConfirm(ctx context.Context, in *datapb.GcConfirmRequest, opts ...grpc.CallOption) (*datapb.GcConfirmResponse, error) {
_va := make([]interface{}, len(opts))

View File

@ -2581,6 +2581,65 @@ func (_c *MixCoord_Flush_Call) RunAndReturn(run func(context.Context, *datapb.Fl
return _c
}
// FlushAll provides a mock function with given fields: _a0, _a1
func (_m *MixCoord) FlushAll(_a0 context.Context, _a1 *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest) *datapb.FlushAllResponse); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MixCoord_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MixCoord_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - _a0 context.Context
// - _a1 *datapb.FlushAllRequest
func (_e *MixCoord_Expecter) FlushAll(_a0 interface{}, _a1 interface{}) *MixCoord_FlushAll_Call {
return &MixCoord_FlushAll_Call{Call: _e.mock.On("FlushAll", _a0, _a1)}
}
func (_c *MixCoord_FlushAll_Call) Run(run func(_a0 context.Context, _a1 *datapb.FlushAllRequest)) *MixCoord_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest))
})
return _c
}
func (_c *MixCoord_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MixCoord_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MixCoord_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error)) *MixCoord_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: _a0, _a1
func (_m *MixCoord) GcConfirm(_a0 context.Context, _a1 *datapb.GcConfirmRequest) (*datapb.GcConfirmResponse, error) {
ret := _m.Called(_a0, _a1)

View File

@ -3266,6 +3266,80 @@ func (_c *MockMixCoordClient_Flush_Call) RunAndReturn(run func(context.Context,
return _c
}
// FlushAll provides a mock function with given fields: ctx, in, opts
func (_m *MockMixCoordClient) FlushAll(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for FlushAll")
}
var r0 *datapb.FlushAllResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) *datapb.FlushAllResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*datapb.FlushAllResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockMixCoordClient_FlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAll'
type MockMixCoordClient_FlushAll_Call struct {
*mock.Call
}
// FlushAll is a helper method to define mock.On call
// - ctx context.Context
// - in *datapb.FlushAllRequest
// - opts ...grpc.CallOption
func (_e *MockMixCoordClient_Expecter) FlushAll(ctx interface{}, in interface{}, opts ...interface{}) *MockMixCoordClient_FlushAll_Call {
return &MockMixCoordClient_FlushAll_Call{Call: _e.mock.On("FlushAll",
append([]interface{}{ctx, in}, opts...)...)}
}
func (_c *MockMixCoordClient_FlushAll_Call) Run(run func(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption)) *MockMixCoordClient_FlushAll_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]grpc.CallOption, len(args)-2)
for i, a := range args[2:] {
if a != nil {
variadicArgs[i] = a.(grpc.CallOption)
}
}
run(args[0].(context.Context), args[1].(*datapb.FlushAllRequest), variadicArgs...)
})
return _c
}
func (_c *MockMixCoordClient_FlushAll_Call) Return(_a0 *datapb.FlushAllResponse, _a1 error) *MockMixCoordClient_FlushAll_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockMixCoordClient_FlushAll_Call) RunAndReturn(run func(context.Context, *datapb.FlushAllRequest, ...grpc.CallOption) (*datapb.FlushAllResponse, error)) *MockMixCoordClient_FlushAll_Call {
_c.Call.Return(run)
return _c
}
// GcConfirm provides a mock function with given fields: ctx, in, opts
func (_m *MockMixCoordClient) GcConfirm(ctx context.Context, in *datapb.GcConfirmRequest, opts ...grpc.CallOption) (*datapb.GcConfirmResponse, error) {
_va := make([]interface{}, len(opts))

View File

@ -28,7 +28,6 @@ import (
"github.com/cockroachdb/errors"
"github.com/gin-gonic/gin"
"github.com/samber/lo"
"github.com/tidwall/gjson"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
@ -4161,12 +4160,8 @@ func (node *Proxy) CalcDistance(ctx context.Context, request *milvuspb.CalcDista
}, nil
}
// FlushAll notifies Proxy to flush all collection's DML messages.
func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest) (*milvuspb.FlushAllResponse, error) {
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-FlushAll")
defer sp.End()
log := log.With(zap.String("db", req.GetDbName()))
// Flush notify data nodes to persist the data of collection.
func (node *Proxy) FlushAll(ctx context.Context, request *milvuspb.FlushAllRequest) (*milvuspb.FlushAllResponse, error) {
resp := &milvuspb.FlushAllResponse{
Status: merr.Success(),
}
@ -4174,83 +4169,71 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
resp.Status = merr.Status(err)
return resp, nil
}
log.Info(rpcReceived("FlushAll"))
hasError := func(status *commonpb.Status, err error) bool {
if err != nil {
resp.Status = merr.Status(err)
log.Warn("FlushAll failed", zap.Error(err))
return true
}
if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("FlushAll failed", zap.String("err", status.GetReason()))
resp.Status = status
return true
}
return false
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-FlushAll")
defer sp.End()
ft := &flushAllTask{
ctx: ctx,
Condition: NewTaskCondition(ctx),
FlushAllRequest: request,
mixCoord: node.mixCoord,
replicateMsgStream: node.replicateMsgStream,
}
dbsRsp, err := node.mixCoord.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases)),
})
if hasError(dbsRsp.GetStatus(), err) {
return resp, nil
}
dbNames := dbsRsp.DbNames
if req.GetDbName() != "" {
dbNames = lo.Filter(dbNames, func(dbName string, _ int) bool {
return dbName == req.GetDbName()
})
if len(dbNames) == 0 {
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(req.GetDbName()))
return resp, nil
method := "FlushAll"
tr := timerecord.NewTimeRecorder(method)
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.TotalLabel, request.GetDbName(), "").Inc()
log := log.Ctx(ctx).With(
zap.String("role", typeutil.ProxyRole),
zap.String("db", request.DbName))
log.Debug(rpcReceived(method))
var enqueuedTask task = ft
if streamingutil.IsStreamingServiceEnabled() {
enqueuedTask = &flushAllTaskbyStreamingService{
flushAllTask: ft,
chMgr: node.chMgr,
}
}
for _, dbName := range dbNames {
// Flush all collections to accelerate the flushAll progress
showColRsp, err := node.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections)),
DbName: dbName,
})
if hasError(showColRsp.GetStatus(), err) {
return resp, nil
}
group, ctx := errgroup.WithContext(ctx)
for _, collection := range showColRsp.GetCollectionNames() {
collection := collection
group.Go(func() error {
flushRsp, err := node.Flush(ctx, &milvuspb.FlushRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
DbName: dbName,
CollectionNames: []string{collection},
})
if err = merr.CheckRPCCall(flushRsp, err); err != nil {
return err
}
return nil
})
}
err = group.Wait()
if hasError(nil, err) {
return resp, nil
}
}
// allocate current ts as FlushAllTs
ts, err := node.tsoAllocator.AllocOne(ctx)
if err != nil {
log.Warn("FlushAll failed", zap.Error(err))
if err := node.sched.dcQueue.Enqueue(enqueuedTask); err != nil {
log.Warn(rpcFailedToEnqueue(method), zap.Error(err))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.AbandonLabel, request.GetDbName(), "").Inc()
resp.Status = merr.Status(err)
return resp, nil
}
resp.FlushAllTs = ts
log.Debug(rpcEnqueued(method),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
log.Info(rpcDone("FlushAll"), zap.Uint64("FlushAllTs", ts),
zap.Time("FlushAllTime", tsoutil.PhysicalTime(ts)))
return resp, nil
if err := ft.WaitToFinish(); err != nil {
log.Warn(
rpcFailedToWaitToFinish(method),
zap.Error(err),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel, request.GetDbName(), "").Inc()
resp.Status = merr.Status(err)
return resp, nil
}
log.Debug(
rpcDone(method),
zap.Uint64("FlushAllTs", ft.result.GetFlushTs()),
zap.Uint64("BeginTs", ft.BeginTs()),
zap.Uint64("EndTs", ft.EndTs()))
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.SuccessLabel, request.GetDbName(), "").Inc()
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
return &milvuspb.FlushAllResponse{
Status: merr.Success(),
FlushAllTs: ft.result.GetFlushTs(),
}, nil
}
// GetDdChannel returns the used channel for dd operations.

View File

@ -25,6 +25,7 @@ import (
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/cockroachdb/errors"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
@ -40,6 +41,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/allocator"
grpcmixcoordclient "github.com/milvus-io/milvus/internal/distributed/mixcoord/client"
"github.com/milvus-io/milvus/internal/distributed/streaming"
mhttp "github.com/milvus-io/milvus/internal/http"
"github.com/milvus-io/milvus/internal/mocks"
@ -380,168 +382,163 @@ func TestProxy_InvalidResourceGroupName(t *testing.T) {
})
}
func TestProxy_FlushAll_DbCollection(t *testing.T) {
tests := []struct {
testName string
FlushRequest *milvuspb.FlushAllRequest
ExpectedSuccess bool
}{
{"flushAll", &milvuspb.FlushAllRequest{}, true},
{"flushAll set db", &milvuspb.FlushAllRequest{DbName: "default"}, true},
{"flushAll set db, db not exist", &milvuspb.FlushAllRequest{DbName: "default2"}, false},
}
cacheBak := globalMetaCache
defer func() { globalMetaCache = cacheBak }()
// set expectations
cache := NewMockCache(t)
cache.On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), nil).Maybe()
cache.On("RemoveDatabase",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
).Maybe()
globalMetaCache = cache
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
factory := dependency.NewDefaultFactory(true)
ctx := context.Background()
paramtable.Init()
node, err := NewProxy(ctx, factory)
assert.NoError(t, err)
node.UpdateStateCode(commonpb.StateCode_Healthy)
node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(),
}
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer(ctx, []string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
assert.NoError(t, err)
err = node.sched.Start()
assert.NoError(t, err)
defer node.sched.Close()
node.mixCoord = mocks.NewMockMixCoordClient(t)
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().Flush(mock.Anything, mock.Anything).
Return(&datapb.FlushResponse{Status: successStatus}, nil).Maybe()
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{Status: successStatus, CollectionNames: []string{"col-0"}}, nil).Maybe()
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
resp, err := node.FlushAll(ctx, test.FlushRequest)
assert.NoError(t, err)
if test.ExpectedSuccess {
assert.True(t, merr.Ok(resp.GetStatus()))
} else {
assert.NotEqual(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
}
})
}
}
func TestProxy_FlushAll(t *testing.T) {
// createTestProxy creates a test proxy instance with all necessary setup
func createTestProxy() *Proxy {
factory := dependency.NewDefaultFactory(true)
ctx := context.Background()
paramtable.Init()
node, err := NewProxy(ctx, factory)
assert.NoError(t, err)
node, _ := NewProxy(ctx, factory)
node.UpdateStateCode(commonpb.StateCode_Healthy)
node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(),
}
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream, _ = node.factory.NewMsgStream(node.ctx)
node.replicateMsgStream.AsProducer(ctx, []string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
assert.NoError(t, err)
err = node.sched.Start()
assert.NoError(t, err)
defer node.sched.Close()
node.mixCoord = mocks.NewMockMixCoordClient(t)
node.sched, _ = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
node.sched.Start()
cacheBak := globalMetaCache
defer func() { globalMetaCache = cacheBak }()
return node
}
// set expectations
cache := NewMockCache(t)
cache.On("GetCollectionID",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
).Return(UniqueID(0), nil).Once()
func TestProxy_FlushAll_NoDatabase(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_NoDatabase", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
cache.On("RemoveDatabase",
mock.Anything, // context.Context
mock.AnythingOfType("string"),
).Maybe()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
globalMetaCache = cache
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().Flush(mock.Anything, mock.Anything).
Return(&datapb.FlushResponse{Status: successStatus}, nil).Maybe()
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{Status: successStatus, CollectionNames: []string{"col-0"}}, nil).Maybe()
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
// Mock grpc mix coord client FlushAll method
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
mockey.Mock((*grpcmixcoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: successStatus}, nil
}).Build()
t.Run("FlushAll", func(t *testing.T) {
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
mixcoord := &grpcmixcoordclient.Client{}
node.mixCoord = mixcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
// Assert: Verify results
assert.NoError(t, err)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
t.Run("FlushAll failed, server is abnormal", func(t *testing.T) {
func TestProxy_FlushAll_WithDefaultDatabase(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_WithDefaultDatabase", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Mock grpc mix coord client FlushAll method for default database
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
mockey.Mock((*grpcmixcoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: successStatus}, nil
}).Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
mixcoord := &grpcmixcoordclient.Client{}
node.mixCoord = mixcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default"})
// Assert: Verify results
assert.NoError(t, err)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
func TestProxy_FlushAll_DatabaseNotExist(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_DatabaseNotExist", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Mock grpc mix coord client FlushAll method for non-existent database
mockey.Mock((*grpcmixcoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_MetaFailed}}, nil
}).Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
mixcoord := &grpcmixcoordclient.Client{}
node.mixCoord = mixcoord
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default2"})
// Assert: Verify results
assert.NoError(t, err)
assert.NotEqual(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
})
}
func TestProxy_FlushAll_ServerAbnormal(t *testing.T) {
mockey.PatchConvey("TestProxy_FlushAll_ServerAbnormal", t, func() {
// Mock global meta cache methods
globalMetaCache = &MetaCache{}
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
return UniqueID(0), nil
}).Build()
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
return nil
}).Build()
// Mock paramtable initialization
mockey.Mock(paramtable.Init).Return().Build()
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
// Act: Execute test
node := createTestProxy()
defer node.sched.Close()
mixcoord := &grpcmixcoordclient.Client{}
node.mixCoord = mixcoord
// Set node state to abnormal
node.UpdateStateCode(commonpb.StateCode_Abnormal)
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
// Assert: Verify results
assert.NoError(t, err)
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
node.UpdateStateCode(commonpb.StateCode_Healthy)
})
t.Run("FlushAll failed, RootCoord showCollections failed", func(t *testing.T) {
node.mixCoord.(*mocks.MockMixCoordClient).ExpectedCalls = nil
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{Status: successStatus, DbNames: []string{"default"}}, nil).Maybe()
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ShowCollections(mock.Anything, mock.Anything).
Return(&milvuspb.ShowCollectionsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "mock err",
},
}, nil).Maybe()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
})
t.Run("FlushAll failed, RootCoord showCollections failed", func(t *testing.T) {
node.mixCoord.(*mocks.MockMixCoordClient).ExpectedCalls = nil
node.mixCoord.(*mocks.MockMixCoordClient).EXPECT().ListDatabases(mock.Anything, mock.Anything).
Return(&milvuspb.ListDatabasesResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "mock err",
},
}, nil).Maybe()
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
})
}

View File

@ -1623,6 +1623,10 @@ func (coord *MixCoordMock) ListLoadedSegments(ctx context.Context, in *querypb.L
return &querypb.ListLoadedSegmentsResponse{}, nil
}
func (coord *MixCoordMock) FlushAll(ctx context.Context, in *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
return &datapb.FlushAllResponse{}, nil
}
type DescribeCollectionFunc func(ctx context.Context, request *milvuspb.DescribeCollectionRequest, opts ...grpc.CallOption) (*milvuspb.DescribeCollectionResponse, error)
type ShowPartitionsFunc func(ctx context.Context, request *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error)

View File

@ -85,6 +85,7 @@ const (
HasPartitionTaskName = "HasPartitionTask"
ShowPartitionTaskName = "ShowPartitionTask"
FlushTaskName = "FlushTask"
FlushAllTaskName = "FlushAllTask"
LoadCollectionTaskName = "LoadCollectionTask"
ReleaseCollectionTaskName = "ReleaseCollectionTask"
LoadPartitionTaskName = "LoadPartitionsTask"

View File

@ -116,7 +116,6 @@ func (t *flushTask) Execute(ctx context.Context) error {
coll2FlushTs[collName] = resp.GetFlushTs()
channelCps = resp.GetChannelCps()
}
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.FlushRequest)
t.result = &milvuspb.FlushResponse{
Status: merr.Success(),
DbName: t.GetDbName(),

View File

@ -0,0 +1,107 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
type flushAllTask struct {
baseTask
Condition
*milvuspb.FlushAllRequest
ctx context.Context
mixCoord types.MixCoordClient
result *datapb.FlushAllResponse
replicateMsgStream msgstream.MsgStream
}
func (t *flushAllTask) TraceCtx() context.Context {
return t.ctx
}
func (t *flushAllTask) ID() UniqueID {
return t.Base.MsgID
}
func (t *flushAllTask) SetID(uid UniqueID) {
t.Base.MsgID = uid
}
func (t *flushAllTask) Name() string {
return FlushAllTaskName
}
func (t *flushAllTask) Type() commonpb.MsgType {
return t.Base.MsgType
}
func (t *flushAllTask) BeginTs() Timestamp {
return t.Base.Timestamp
}
func (t *flushAllTask) EndTs() Timestamp {
return t.Base.Timestamp
}
func (t *flushAllTask) SetTs(ts Timestamp) {
t.Base.Timestamp = ts
}
func (t *flushAllTask) OnEnqueue() error {
if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
t.Base.MsgType = commonpb.MsgType_Flush
t.Base.SourceID = paramtable.GetNodeID()
return nil
}
func (t *flushAllTask) PreExecute(ctx context.Context) error {
return nil
}
func (t *flushAllTask) Execute(ctx context.Context) error {
flushAllReq := &datapb.FlushAllRequest{
Base: commonpbutil.UpdateMsgBase(
t.Base,
commonpbutil.WithMsgType(commonpb.MsgType_Flush),
),
}
resp, err := t.mixCoord.FlushAll(ctx, flushAllReq)
if err = merr.CheckRPCCall(resp, err); err != nil {
return fmt.Errorf("failed to call flush all to data coordinator: %s", err.Error())
}
t.result = resp
return nil
}
func (t *flushAllTask) PostExecute(ctx context.Context) error {
return nil
}

View File

@ -0,0 +1,102 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
)
type flushAllTaskbyStreamingService struct {
*flushAllTask
chMgr channelsMgr
}
func (t *flushAllTaskbyStreamingService) Execute(ctx context.Context) error {
dbNames := make([]string, 0)
if t.GetDbName() != "" {
dbNames = append(dbNames, t.GetDbName())
} else {
listResp, err := t.mixCoord.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases)),
})
if err != nil {
log.Info("flush all task by streaming service failed, list databases failed", zap.Error(err))
return err
}
dbNames = listResp.GetDbNames()
}
flushTs := t.BeginTs()
wg := errgroup.Group{}
// limit goroutine number to 100
wg.SetLimit(100)
for _, dbName := range dbNames {
showColRsp, err := t.mixCoord.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections)),
DbName: dbName,
})
if err != nil {
log.Info("flush all task by streaming service failed, show collections failed", zap.String("dbName", dbName), zap.Error(err))
return err
}
collections := showColRsp.GetCollectionNames()
for _, collName := range collections {
coll := collName
wg.Go(func() error {
collID, err := globalMetaCache.GetCollectionID(t.ctx, t.DbName, coll)
if err != nil {
return merr.WrapErrAsInputErrorWhen(err, merr.ErrCollectionNotFound, merr.ErrDatabaseNotFound)
}
vchannels, err := t.chMgr.getVChannels(collID)
if err != nil {
return err
}
// Ask the streamingnode to flush segments.
for _, vchannel := range vchannels {
_, err := sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
if err != nil {
return err
}
}
return nil
})
}
}
err := wg.Wait()
if err != nil {
return err
}
t.result = &datapb.FlushAllResponse{
Status: merr.Success(),
FlushTs: flushTs,
}
return nil
}

View File

@ -0,0 +1,430 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"testing"
"time"
"github.com/bytedance/mockey"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
)
func createTestFlushAllTaskByStreamingService(t *testing.T, dbName string) (*flushAllTaskbyStreamingService, *mocks.MockMixCoordClient, *msgstream.MockMsgStream, *MockChannelsMgr, context.Context) {
ctx := context.Background()
mixCoord := mocks.NewMockMixCoordClient(t)
replicateMsgStream := msgstream.NewMockMsgStream(t)
chMgr := NewMockChannelsMgr(t)
baseTask := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 1,
Timestamp: uint64(time.Now().UnixNano()),
SourceID: 1,
},
DbName: dbName,
},
ctx: ctx,
mixCoord: mixCoord,
replicateMsgStream: replicateMsgStream,
}
task := &flushAllTaskbyStreamingService{
flushAllTask: baseTask,
chMgr: chMgr,
}
return task, mixCoord, replicateMsgStream, chMgr, ctx
}
func TestFlushAllTask_WithSpecificDB(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_WithSpecificDB", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1", "collection2"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).To(func(cache *MetaCache, ctx context.Context, database, collectionName string) (UniqueID, error) {
if collectionName == "collection1" {
return UniqueID(100), nil
} else if collectionName == "collection2" {
return UniqueID(200), nil
}
return 0, fmt.Errorf("collection not found")
}).Build()
// Mock getVChannels
mockey.Mock(mockey.GetMethod(chMgr, "getVChannels")).To(func(collID UniqueID) ([]string, error) {
if collID == UniqueID(100) {
return []string{"vchannel1", "vchannel2"}, nil
} else if collID == UniqueID(200) {
return []string{"vchannel3"}, nil
}
return nil, fmt.Errorf("collection not found")
}).Build()
// Mock sendManualFlushToWAL
mockey.Mock(sendManualFlushToWAL).Return(nil, nil).Build()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.NotNil(t, task.result)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
})
}
func TestFlushAllTask_WithoutSpecificDB(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_WithoutSpecificDB", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ListDatabases
listDBResp := &milvuspb.ListDatabasesResponse{
Status: merr.Success(),
DbNames: []string{"db1", "db2"},
}
mixCoord.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(listDBResp, nil).Once()
// Mock ShowCollections for each database
showColResp1 := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
showColResp2 := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection2"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.MatchedBy(func(req *milvuspb.ShowCollectionsRequest) bool {
return req.DbName == "db1"
})).Return(showColResp1, nil).Once()
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.MatchedBy(func(req *milvuspb.ShowCollectionsRequest) bool {
return req.DbName == "db2"
})).Return(showColResp2, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).To(func(cache *MetaCache, ctx context.Context, database, collectionName string) (UniqueID, error) {
if collectionName == "collection1" {
return UniqueID(100), nil
} else if collectionName == "collection2" {
return UniqueID(200), nil
}
return 0, fmt.Errorf("collection not found")
}).Build()
// Mock getVChannels
mockey.Mock(mockey.GetMethod(chMgr, "getVChannels")).To(func(collID UniqueID) ([]string, error) {
if collID == UniqueID(100) {
return []string{"vchannel1"}, nil
} else if collID == UniqueID(200) {
return []string{"vchannel2"}, nil
}
return nil, fmt.Errorf("collection not found")
}).Build()
// Mock sendManualFlushToWAL
mockey.Mock(sendManualFlushToWAL).Return(nil, nil).Build()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.NotNil(t, task.result)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
})
}
func TestFlushAllTask_ListDatabasesError(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_ListDatabasesError", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ListDatabases with error
expectedErr := fmt.Errorf("list databases failed")
mixCoord.EXPECT().ListDatabases(mock.Anything, mock.AnythingOfType("*milvuspb.ListDatabasesRequest")).
Return(nil, expectedErr).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "list databases failed")
})
}
func TestFlushAllTask_ShowCollectionsError(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_ShowCollectionsError", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections with error
expectedErr := fmt.Errorf("show collections failed")
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(nil, expectedErr).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "show collections failed")
})
}
func TestFlushAllTask_GetCollectionIDError(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_GetCollectionIDError", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID with error
globalMetaCache = &MetaCache{}
expectedErr := fmt.Errorf("collection not found")
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(0), expectedErr).Build()
err := task.Execute(ctx)
assert.Error(t, err)
// The error should be wrapped by merr.WrapErrAsInputErrorWhen
assert.NotNil(t, err)
})
}
func TestFlushAllTask_GetVChannelsError(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_GetVChannelsError", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(100), nil).Build()
// Mock getVChannels with error
expectedErr := fmt.Errorf("get vchannels failed")
chMgr.EXPECT().getVChannels(UniqueID(100)).Return(nil, expectedErr).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "get vchannels failed")
})
}
func TestFlushAllTask_SendManualFlushError(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_SendManualFlushError", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(100), nil).Build()
// Mock getVChannels
chMgr.EXPECT().getVChannels(UniqueID(100)).Return([]string{"vchannel1"}, nil).Once()
// Mock sendManualFlushToWAL with error
expectedErr := fmt.Errorf("send manual flush failed")
mockey.Mock(sendManualFlushToWAL).Return(nil, expectedErr).Build()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "send manual flush failed")
})
}
func TestFlushAllTask_WithEmptyCollections(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_WithEmptyCollections", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections with empty collections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.NotNil(t, task.result)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
})
}
func TestFlushAllTask_WithEmptyVChannels(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_WithEmptyVChannels", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(100), nil).Build()
// Mock getVChannels with empty channels
mockey.Mock(mockey.GetMethod(chMgr, "getVChannels")).To(func(collID UniqueID) ([]string, error) {
return []string{}, nil
}).Build()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.NotNil(t, task.result)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
})
}
func TestFlushAllTask_MultipleVChannels(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_MultipleVChannels", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(100), nil).Build()
// Mock getVChannels with multiple channels
vchannels := []string{"vchannel1", "vchannel2", "vchannel3"}
mockey.Mock(mockey.GetMethod(chMgr, "getVChannels")).To(func(collID UniqueID) ([]string, error) {
return vchannels, nil
}).Build()
// Mock sendManualFlushToWAL - should be called for each vchannel
callCount := 0
mockey.Mock(sendManualFlushToWAL).To(func(ctx context.Context, collID UniqueID, vchannel string, flushTs Timestamp) ([]int64, error) {
callCount++
assert.Equal(t, UniqueID(100), collID)
assert.Contains(t, vchannels, vchannel)
return nil, nil
}).Build()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, len(vchannels), callCount) // Verify sendManualFlushToWAL was called for each vchannel
assert.NotNil(t, task.result)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
})
}
func TestFlushAllTask_VerifyFlushTs(t *testing.T) {
mockey.PatchConvey("TestFlushAllTask_VerifyFlushTs", t, func() {
task, mixCoord, replicateMsgStream, chMgr, ctx := createTestFlushAllTaskByStreamingService(t, "test_db")
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
defer chMgr.AssertExpectations(t)
// Mock ShowCollections
showColResp := &milvuspb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionNames: []string{"collection1"},
}
mixCoord.EXPECT().ShowCollections(mock.Anything, mock.AnythingOfType("*milvuspb.ShowCollectionsRequest")).
Return(showColResp, nil).Once()
// Mock GetCollectionID
globalMetaCache = &MetaCache{}
mockey.Mock((*MetaCache).GetCollectionID).Return(UniqueID(100), nil).Build()
// Mock getVChannels
mockey.Mock(mockey.GetMethod(chMgr, "getVChannels")).To(func(collID UniqueID) ([]string, error) {
return []string{"vchannel1"}, nil
}).Build()
// Mock sendManualFlushToWAL and verify flushTs
expectedFlushTs := task.BeginTs()
mockey.Mock(sendManualFlushToWAL).To(func(ctx context.Context, collID UniqueID, vchannel string, flushTs Timestamp) ([]int64, error) {
assert.Equal(t, expectedFlushTs, flushTs)
return nil, nil
}).Build()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.NotNil(t, task.result)
assert.Equal(t, expectedFlushTs, task.result.FlushTs)
})
}

View File

@ -0,0 +1,430 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/uniquegenerator"
)
func createTestFlushAllTask(t *testing.T) (*flushAllTask, *mocks.MockMixCoordClient, *msgstream.MockMsgStream, context.Context) {
ctx := context.Background()
mixCoord := mocks.NewMockMixCoordClient(t)
replicateMsgStream := msgstream.NewMockMsgStream(t)
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 1,
Timestamp: uint64(time.Now().UnixNano()),
SourceID: 1,
},
},
ctx: ctx,
mixCoord: mixCoord,
replicateMsgStream: replicateMsgStream,
}
return task, mixCoord, replicateMsgStream, ctx
}
func TestFlushAllTaskTraceCtx(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
traceCtx := task.TraceCtx()
assert.Equal(t, ctx, traceCtx)
}
func TestFlushAllTaskID(t *testing.T) {
task, mixCoord, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test getting ID
originalID := task.ID()
assert.Equal(t, UniqueID(1), originalID)
// Test setting ID
newID := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
task.SetID(newID)
assert.Equal(t, newID, task.ID())
}
func TestFlushAllTaskName(t *testing.T) {
task, mixCoord, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
name := task.Name()
assert.Equal(t, FlushAllTaskName, name)
}
func TestFlushAllTaskType(t *testing.T) {
task, mixCoord, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
msgType := task.Type()
assert.Equal(t, commonpb.MsgType_Flush, msgType)
}
func TestFlushAllTaskTimestampMethods(t *testing.T) {
task, mixCoord, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
originalTs := task.BeginTs()
assert.Equal(t, originalTs, task.EndTs())
newTs := Timestamp(time.Now().UnixNano())
task.SetTs(newTs)
assert.Equal(t, newTs, task.BeginTs())
assert.Equal(t, newTs, task.EndTs())
}
func TestFlushAllTaskOnEnqueue(t *testing.T) {
ctx := context.Background()
mixCoord := mocks.NewMockMixCoordClient(t)
defer mixCoord.AssertExpectations(t)
// Test with nil Base
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{},
ctx: ctx,
mixCoord: mixCoord,
}
err := task.OnEnqueue()
assert.NoError(t, err)
assert.NotNil(t, task.Base)
assert.Equal(t, commonpb.MsgType_Flush, task.Base.MsgType)
// Test with existing Base
task, _, replicateMsgStream, _ := createTestFlushAllTask(t)
defer replicateMsgStream.AssertExpectations(t)
err = task.OnEnqueue()
assert.NoError(t, err)
assert.Equal(t, commonpb.MsgType_Flush, task.Base.MsgType)
}
func TestFlushAllTaskPreExecute(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
err := task.PreExecute(ctx)
assert.NoError(t, err)
}
func TestFlushAllTaskExecuteSuccess(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Setup expectations
expectedResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(expectedResp, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, expectedResp, task.result)
}
func TestFlushAllTaskExecuteFlushAllRPCError(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test RPC call error
expectedErr := fmt.Errorf("rpc error")
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(nil, expectedErr).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to call flush all to data coordinator")
}
func TestFlushAllTaskExecuteFlushAllResponseError(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test response with error status
errorResp := &datapb.FlushAllResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "flush all failed",
},
}
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(errorResp, nil).Once()
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to call flush all to data coordinator")
}
func TestFlushAllTaskExecuteWithMerCheck(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test successful execution with merr.CheckRPCCall
successResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(successResp, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, successResp, task.result)
}
func TestFlushAllTaskExecuteRequestContent(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test the content of the FlushAllRequest sent to mixCoord
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(&datapb.FlushAllResponse{Status: merr.Success()}, nil).Once()
err := task.Execute(ctx)
assert.NoError(t, err)
// The test verifies that Execute method creates the correct request structure internally
// The actual request content validation is covered by other tests
}
func TestFlushAllTaskPostExecute(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
err := task.PostExecute(ctx)
assert.NoError(t, err)
}
func TestFlushAllTaskLifecycle(t *testing.T) {
ctx := context.Background()
mixCoord := mocks.NewMockMixCoordClient(t)
replicateMsgStream := msgstream.NewMockMsgStream(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test complete task lifecycle
// 1. OnEnqueue
task := &flushAllTask{
baseTask: baseTask{},
Condition: NewTaskCondition(ctx),
FlushAllRequest: &milvuspb.FlushAllRequest{},
ctx: ctx,
mixCoord: mixCoord,
replicateMsgStream: replicateMsgStream,
}
err := task.OnEnqueue()
assert.NoError(t, err)
// 2. PreExecute
err = task.PreExecute(ctx)
assert.NoError(t, err)
// 3. Execute
expectedResp := &datapb.FlushAllResponse{
Status: merr.Success(),
}
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(expectedResp, nil).Once()
err = task.Execute(ctx)
assert.NoError(t, err)
// 4. PostExecute
err = task.PostExecute(ctx)
assert.NoError(t, err)
// Verify task state
assert.Equal(t, expectedResp, task.result)
}
func TestFlushAllTaskErrorHandlingInExecute(t *testing.T) {
// Test different error scenarios in Execute method
testCases := []struct {
name string
setupMock func(*mocks.MockMixCoordClient)
expectedError string
}{
{
name: "mixCoord FlushAll returns error",
setupMock: func(mixCoord *mocks.MockMixCoordClient) {
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(nil, fmt.Errorf("network error")).Once()
},
expectedError: "failed to call flush all to data coordinator",
},
{
name: "mixCoord FlushAll returns error status",
setupMock: func(mixCoord *mocks.MockMixCoordClient) {
mixCoord.EXPECT().FlushAll(mock.Anything, mock.AnythingOfType("*datapb.FlushAllRequest")).
Return(&datapb.FlushAllResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_IllegalArgument,
Reason: "invalid request",
},
}, nil).Once()
},
expectedError: "failed to call flush all to data coordinator",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
task, mixCoord, replicateMsgStream, ctx := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
tc.setupMock(mixCoord)
err := task.Execute(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedError)
})
}
}
func TestFlushAllTaskImplementsTaskInterface(t *testing.T) {
// Verify that flushAllTask implements the task interface
var _ task = (*flushAllTask)(nil)
task, mixCoord, replicateMsgStream, _ := createTestFlushAllTask(t)
defer mixCoord.AssertExpectations(t)
defer replicateMsgStream.AssertExpectations(t)
// Test all interface methods are accessible
assert.NotNil(t, task.TraceCtx)
assert.NotNil(t, task.ID)
assert.NotNil(t, task.SetID)
assert.NotNil(t, task.Name)
assert.NotNil(t, task.Type)
assert.NotNil(t, task.BeginTs)
assert.NotNil(t, task.EndTs)
assert.NotNil(t, task.SetTs)
assert.NotNil(t, task.OnEnqueue)
assert.NotNil(t, task.PreExecute)
assert.NotNil(t, task.Execute)
assert.NotNil(t, task.PostExecute)
}
func TestFlushAllTaskNilHandling(t *testing.T) {
// Test behavior with nil values
task := &flushAllTask{
FlushAllRequest: &milvuspb.FlushAllRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 1,
Timestamp: uint64(time.Now().UnixNano()),
SourceID: 1,
},
},
}
// Test TraceCtx with nil context
ctx := task.TraceCtx()
assert.Nil(t, ctx)
// Test ID with nil Base
id := task.ID()
assert.Equal(t, UniqueID(1), id)
// Test Type with nil Base
msgType := task.Type()
assert.Equal(t, commonpb.MsgType_Flush, msgType)
}
func TestFlushAllTaskConstantValues(t *testing.T) {
// Test that task name constant is correct
assert.Equal(t, "FlushAllTask", FlushAllTaskName)
// Test task name method returns correct constant
task := &flushAllTask{}
assert.Equal(t, FlushAllTaskName, task.Name())
}
func TestFlushAllTaskBaseTaskMethods(t *testing.T) {
// Test baseTask methods
task := &flushAllTask{
baseTask: baseTask{},
}
// Test CanSkipAllocTimestamp
assert.False(t, task.CanSkipAllocTimestamp())
// Test SetOnEnqueueTime
task.SetOnEnqueueTime()
assert.False(t, task.onEnqueueTime.IsZero())
// Test GetDurationInQueue
time.Sleep(1 * time.Millisecond)
duration := task.GetDurationInQueue()
assert.Greater(t, duration, time.Duration(0))
// Test IsSubTask
assert.False(t, task.IsSubTask())
// Test SetExecutingTime
task.SetExecutingTime()
assert.False(t, task.executingTime.IsZero())
// Test GetDurationInExecuting
time.Sleep(1 * time.Millisecond)
execDuration := task.GetDurationInExecuting()
assert.Greater(t, execDuration, time.Duration(0))
}

View File

@ -63,7 +63,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
// Ask the streamingnode to flush segments.
for _, vchannel := range vchannels {
segmentIDs, err := t.sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
segmentIDs, err := sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
if err != nil {
return err
}
@ -99,8 +99,6 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
coll2FlushTs[collName] = flushTs
channelCps = resp.GetChannelCps()
}
// TODO: refactor to use streaming service
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.FlushRequest)
t.result = &milvuspb.FlushResponse{
Status: merr.Success(),
DbName: t.GetDbName(),
@ -114,7 +112,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error {
}
// sendManualFlushToWAL sends a manual flush message to WAL.
func (t *flushTaskByStreamingService) sendManualFlushToWAL(ctx context.Context, collID int64, vchannel string, flushTs uint64) ([]int64, error) {
func sendManualFlushToWAL(ctx context.Context, collID int64, vchannel string, flushTs uint64) ([]int64, error) {
logger := log.Ctx(ctx).With(zap.Int64("collectionID", collID), zap.String("vchannel", vchannel))
flushMsg, err := message.NewManualFlushMessageBuilderV2().
WithVChannel(vchannel).

View File

@ -30,6 +30,8 @@ enum SegmentLevel {
service DataCoord {
rpc Flush(FlushRequest) returns (FlushResponse) {}
rpc FlushAll(FlushAllRequest) returns(FlushAllResponse) {}
// AllocSegment alloc a new growing segment, add it into segment meta.
rpc AllocSegment(AllocSegmentRequest) returns (AllocSegmentResponse) {}
@ -155,6 +157,25 @@ message FlushResponse {
map<string, msg.MsgPosition> channel_cps = 8;
}
message FlushResult {
int64 collectionID = 1;
repeated int64 segmentIDs =2; // newly sealed segments
repeated int64 flushSegmentIDs = 3; // old flushed segment
int64 timeOfSeal = 4;
uint64 flush_ts = 5;
map<string, msg.MsgPosition> channel_cps = 6;
}
message FlushAllRequest {
common.MsgBase base = 1;
string dbName = 2;
}
message FlushAllResponse {
common.Status status = 1;
uint64 flushTs = 2;
}
message FlushChannelsRequest {
common.MsgBase base = 1;
uint64 flush_ts = 2;

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@ const _ = grpc.SupportPackageIsVersion7
const (
DataCoord_Flush_FullMethodName = "/milvus.proto.data.DataCoord/Flush"
DataCoord_FlushAll_FullMethodName = "/milvus.proto.data.DataCoord/FlushAll"
DataCoord_AllocSegment_FullMethodName = "/milvus.proto.data.DataCoord/AllocSegment"
DataCoord_AssignSegmentID_FullMethodName = "/milvus.proto.data.DataCoord/AssignSegmentID"
DataCoord_GetSegmentInfo_FullMethodName = "/milvus.proto.data.DataCoord/GetSegmentInfo"
@ -76,6 +77,7 @@ const (
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DataCoordClient interface {
Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error)
FlushAll(ctx context.Context, in *FlushAllRequest, opts ...grpc.CallOption) (*FlushAllResponse, error)
// AllocSegment alloc a new growing segment, add it into segment meta.
AllocSegment(ctx context.Context, in *AllocSegmentRequest, opts ...grpc.CallOption) (*AllocSegmentResponse, error)
// Deprecated: Do not use.
@ -147,6 +149,15 @@ func (c *dataCoordClient) Flush(ctx context.Context, in *FlushRequest, opts ...g
return out, nil
}
func (c *dataCoordClient) FlushAll(ctx context.Context, in *FlushAllRequest, opts ...grpc.CallOption) (*FlushAllResponse, error) {
out := new(FlushAllResponse)
err := c.cc.Invoke(ctx, DataCoord_FlushAll_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dataCoordClient) AllocSegment(ctx context.Context, in *AllocSegmentRequest, opts ...grpc.CallOption) (*AllocSegmentResponse, error) {
out := new(AllocSegmentResponse)
err := c.cc.Invoke(ctx, DataCoord_AllocSegment_FullMethodName, in, out, opts...)
@ -558,6 +569,7 @@ func (c *dataCoordClient) ListImports(ctx context.Context, in *internalpb.ListIm
// for forward compatibility
type DataCoordServer interface {
Flush(context.Context, *FlushRequest) (*FlushResponse, error)
FlushAll(context.Context, *FlushAllRequest) (*FlushAllResponse, error)
// AllocSegment alloc a new growing segment, add it into segment meta.
AllocSegment(context.Context, *AllocSegmentRequest) (*AllocSegmentResponse, error)
// Deprecated: Do not use.
@ -619,6 +631,9 @@ type UnimplementedDataCoordServer struct {
func (UnimplementedDataCoordServer) Flush(context.Context, *FlushRequest) (*FlushResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented")
}
func (UnimplementedDataCoordServer) FlushAll(context.Context, *FlushAllRequest) (*FlushAllResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FlushAll not implemented")
}
func (UnimplementedDataCoordServer) AllocSegment(context.Context, *AllocSegmentRequest) (*AllocSegmentResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocSegment not implemented")
}
@ -784,6 +799,24 @@ func _DataCoord_Flush_Handler(srv interface{}, ctx context.Context, dec func(int
return interceptor(ctx, in, info, handler)
}
func _DataCoord_FlushAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FlushAllRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DataCoordServer).FlushAll(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: DataCoord_FlushAll_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DataCoordServer).FlushAll(ctx, req.(*FlushAllRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DataCoord_AllocSegment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AllocSegmentRequest)
if err := dec(in); err != nil {
@ -1605,6 +1638,10 @@ var DataCoord_ServiceDesc = grpc.ServiceDesc{
MethodName: "Flush",
Handler: _DataCoord_Flush_Handler,
},
{
MethodName: "FlushAll",
Handler: _DataCoord_FlushAll_Handler,
},
{
MethodName: "AllocSegment",
Handler: _DataCoord_AllocSegment_Handler,

View File

@ -36,14 +36,14 @@ fi
# starting the timer
beginTime=`date +%s`
pushd cmd/tools
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic ./...
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic ./...
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out
fi
popd
for d in $(go list ./internal/... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ${FILE_COVERAGE_INFO}
rm profile.out
@ -51,7 +51,7 @@ for d in $(go list ./internal/... | grep -v -e vendor -e kafka -e planparserv2/g
done
pushd pkg
for d in $(go list ./... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic,test -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out
@ -61,7 +61,7 @@ popd
# milvusclient
pushd client
for d in $(go list ./... | grep -v -e vendor -e kafka -e planparserv2/generated -e mocks); do
$TEST_CMD -race -tags dynamic -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
$TEST_CMD -gcflags="all=-N -l" -race -tags dynamic -v -coverpkg=./... -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
grep -v kafka profile.out | grep -v planparserv2/generated | grep -v mocks | sed '1d' >> ../${FILE_COVERAGE_INFO}
rm profile.out

View File

@ -60,114 +60,114 @@ done
function test_proxy()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/proxy/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_querynode()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/querynodev2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querynode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/querynodev2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querynode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_kv()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/kv/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/kv/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_mq()
{
go test -race -cover -tags dynamic,test $(go list "${MILVUS_DIR}/mq/..." | grep -v kafka) -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test $(go list "${MILVUS_DIR}/mq/..." | grep -v kafka) -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_storage()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/storage" -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/storage" -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_allocator()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/allocator/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/allocator/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_tso()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/tso/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/tso/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_util()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/funcutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/funcutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/util/retry/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/util/retry/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/sessionutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/typeutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/importutilv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/proxyutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/initcore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/cgo/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/sessionutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/typeutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/importutilv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/proxyutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/initcore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/cgo/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_pkg()
{
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/common/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/config/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/log/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/mq/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/tracer/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/util/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/common/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/config/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/log/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/mq/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/tracer/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/util/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
}
function test_datanode
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datanode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_rootcoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/rootcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/rootcoord" -failfast -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/rootcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/rootcoord" -failfast -ldflags="-r ${RPATH}"
}
function test_datacoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/datacoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_querycoord()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querycoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/querycoordv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/querycoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/querycoordv2/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_metastore()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/metastore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/metastore/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_cmd()
{
go test -race -cover -tags dynamic,test "${ROOT_DIR}/cmd/tools/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${ROOT_DIR}/cmd/tools/..." -failfast -count=1 -ldflags="-r ${RPATH}"
}
function test_streaming()
{
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingnode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingcoord/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/streamingnode/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/util/streamingutil/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${MILVUS_DIR}/distributed/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
pushd pkg
go test -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
go test -gcflags="all=-N -l" -race -cover -tags dynamic,test "${PKG_DIR}/streaming/..." -failfast -count=1 -ldflags="-r ${RPATH}"
popd
}

View File

@ -246,7 +246,6 @@ func (s *ArrayStructDataNodeSuite) checkFieldsData(fieldsData []*schemapb.FieldD
default:
s.Fail(fmt.Sprintf("unsupported field type: %s", fieldData.FieldName))
}
}
}
}