mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-28 14:35:27 +08:00
enhance: Implement new FlushAllMessage and refactor flush all (#45920)
This PR: 1. Define and implement the new FlushAllMessage. 2. Refactor FlushAll to flush the entire cluster. issue: https://github.com/milvus-io/milvus/issues/45919 --------- Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
parent
8780e12570
commit
f32f2694bc
@ -6,15 +6,13 @@ require (
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/cockroachdb/errors v1.9.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6
|
||||
github.com/milvus-io/milvus/pkg/v2 v2.6.4-0.20251104142533-a2ce70d25256
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23
|
||||
github.com/samber/lo v1.27.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tidwall/gjson v1.17.1
|
||||
go.opentelemetry.io/otel v1.34.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
)
|
||||
@ -94,6 +92,7 @@ require (
|
||||
go.etcd.io/etcd/server/v3 v3.5.5 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
@ -104,6 +103,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
|
||||
@ -330,8 +330,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45 h1:TMUhlirMCH2zgJD+qClP5EP0yuFl1VrE4j+0fiRSuJU=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6 h1:TeHfsRCdjbX30xS7Npcb+POQXd460+AjmXYmmTuxyBA=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus/pkg/v2 v2.6.4-0.20251104142533-a2ce70d25256 h1:M2waty0w2k4YT2HHzJk3fx6EFPD4DKxNJatitIV+gGU=
|
||||
github.com/milvus-io/milvus/pkg/v2 v2.6.4-0.20251104142533-a2ce70d25256/go.mod h1:HT6Wxahwj/l8+i+D/C3iwDzCjDa36U9gyVw6CjjK4pE=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
|
||||
@ -912,6 +912,65 @@ func (_c *MilvusServiceServer_CheckHealth_Call) RunAndReturn(run func(context.Co
|
||||
return _c
|
||||
}
|
||||
|
||||
// ComputePhraseMatchSlop provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) ComputePhraseMatchSlop(_a0 context.Context, _a1 *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ComputePhraseMatchSlop")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ComputePhraseMatchSlopResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) *milvuspb.ComputePhraseMatchSlopResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ComputePhraseMatchSlopResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_ComputePhraseMatchSlop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputePhraseMatchSlop'
|
||||
type MilvusServiceServer_ComputePhraseMatchSlop_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ComputePhraseMatchSlop is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ComputePhraseMatchSlopRequest
|
||||
func (_e *MilvusServiceServer_Expecter) ComputePhraseMatchSlop(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_ComputePhraseMatchSlop_Call {
|
||||
return &MilvusServiceServer_ComputePhraseMatchSlop_Call{Call: _e.mock.On("ComputePhraseMatchSlop", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ComputePhraseMatchSlop_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ComputePhraseMatchSlopRequest)) *MilvusServiceServer_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ComputePhraseMatchSlopRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ComputePhraseMatchSlop_Call) Return(_a0 *milvuspb.ComputePhraseMatchSlopResponse, _a1 error) *MilvusServiceServer_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ComputePhraseMatchSlop_Call) RunAndReturn(run func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error)) *MilvusServiceServer_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Connect provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) Connect(_a0 context.Context, _a1 *milvuspb.ConnectRequest) (*milvuspb.ConnectResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -1607,6 +1666,65 @@ func (_c *MilvusServiceServer_CreateRowPolicy_Call) RunAndReturn(run func(contex
|
||||
return _c
|
||||
}
|
||||
|
||||
// CreateSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) CreateSnapshot(_a0 context.Context, _a1 *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateSnapshot")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.CreateSnapshotRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.CreateSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_CreateSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSnapshot'
|
||||
type MilvusServiceServer_CreateSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CreateSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.CreateSnapshotRequest
|
||||
func (_e *MilvusServiceServer_Expecter) CreateSnapshot(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_CreateSnapshot_Call {
|
||||
return &MilvusServiceServer_CreateSnapshot_Call{Call: _e.mock.On("CreateSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_CreateSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.CreateSnapshotRequest)) *MilvusServiceServer_CreateSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.CreateSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_CreateSnapshot_Call) Return(_a0 *commonpb.Status, _a1 error) *MilvusServiceServer_CreateSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_CreateSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error)) *MilvusServiceServer_CreateSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Delete provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) Delete(_a0 context.Context, _a1 *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -2138,6 +2256,65 @@ func (_c *MilvusServiceServer_DescribeSegmentIndexData_Call) RunAndReturn(run fu
|
||||
return _c
|
||||
}
|
||||
|
||||
// DescribeSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) DescribeSnapshot(_a0 context.Context, _a1 *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DescribeSnapshot")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.DescribeSnapshotResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DescribeSnapshotRequest) *milvuspb.DescribeSnapshotResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.DescribeSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.DescribeSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_DescribeSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeSnapshot'
|
||||
type MilvusServiceServer_DescribeSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DescribeSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.DescribeSnapshotRequest
|
||||
func (_e *MilvusServiceServer_Expecter) DescribeSnapshot(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_DescribeSnapshot_Call {
|
||||
return &MilvusServiceServer_DescribeSnapshot_Call{Call: _e.mock.On("DescribeSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DescribeSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.DescribeSnapshotRequest)) *MilvusServiceServer_DescribeSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.DescribeSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DescribeSnapshot_Call) Return(_a0 *milvuspb.DescribeSnapshotResponse, _a1 error) *MilvusServiceServer_DescribeSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DescribeSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error)) *MilvusServiceServer_DescribeSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropAlias provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) DropAlias(_a0 context.Context, _a1 *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -2728,6 +2905,65 @@ func (_c *MilvusServiceServer_DropRowPolicy_Call) RunAndReturn(run func(context.
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) DropSnapshot(_a0 context.Context, _a1 *milvuspb.DropSnapshotRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropSnapshot")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DropSnapshotRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DropSnapshotRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.DropSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_DropSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropSnapshot'
|
||||
type MilvusServiceServer_DropSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DropSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.DropSnapshotRequest
|
||||
func (_e *MilvusServiceServer_Expecter) DropSnapshot(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_DropSnapshot_Call {
|
||||
return &MilvusServiceServer_DropSnapshot_Call{Call: _e.mock.On("DropSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DropSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.DropSnapshotRequest)) *MilvusServiceServer_DropSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.DropSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DropSnapshot_Call) Return(_a0 *commonpb.Status, _a1 error) *MilvusServiceServer_DropSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_DropSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.DropSnapshotRequest) (*commonpb.Status, error)) *MilvusServiceServer_DropSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Dummy provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) Dummy(_a0 context.Context, _a1 *milvuspb.DummyRequest) (*milvuspb.DummyResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -3967,6 +4203,65 @@ func (_c *MilvusServiceServer_GetReplicateInfo_Call) RunAndReturn(run func(conte
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetRestoreSnapshotState provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) GetRestoreSnapshotState(_a0 context.Context, _a1 *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetRestoreSnapshotState")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.GetRestoreSnapshotStateResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) *milvuspb.GetRestoreSnapshotStateResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.GetRestoreSnapshotStateResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_GetRestoreSnapshotState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRestoreSnapshotState'
|
||||
type MilvusServiceServer_GetRestoreSnapshotState_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetRestoreSnapshotState is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.GetRestoreSnapshotStateRequest
|
||||
func (_e *MilvusServiceServer_Expecter) GetRestoreSnapshotState(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_GetRestoreSnapshotState_Call {
|
||||
return &MilvusServiceServer_GetRestoreSnapshotState_Call{Call: _e.mock.On("GetRestoreSnapshotState", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_GetRestoreSnapshotState_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.GetRestoreSnapshotStateRequest)) *MilvusServiceServer_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.GetRestoreSnapshotStateRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_GetRestoreSnapshotState_Call) Return(_a0 *milvuspb.GetRestoreSnapshotStateResponse, _a1 error) *MilvusServiceServer_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_GetRestoreSnapshotState_Call) RunAndReturn(run func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error)) *MilvusServiceServer_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserTags provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) GetUserTags(_a0 context.Context, _a1 *milvuspb.GetUserTagsRequest) (*milvuspb.GetUserTagsResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -4852,6 +5147,65 @@ func (_c *MilvusServiceServer_ListResourceGroups_Call) RunAndReturn(run func(con
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListRestoreSnapshotJobs provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) ListRestoreSnapshotJobs(_a0 context.Context, _a1 *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListRestoreSnapshotJobs")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ListRestoreSnapshotJobsResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) *milvuspb.ListRestoreSnapshotJobsResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ListRestoreSnapshotJobsResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_ListRestoreSnapshotJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListRestoreSnapshotJobs'
|
||||
type MilvusServiceServer_ListRestoreSnapshotJobs_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ListRestoreSnapshotJobs is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ListRestoreSnapshotJobsRequest
|
||||
func (_e *MilvusServiceServer_Expecter) ListRestoreSnapshotJobs(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_ListRestoreSnapshotJobs_Call {
|
||||
return &MilvusServiceServer_ListRestoreSnapshotJobs_Call{Call: _e.mock.On("ListRestoreSnapshotJobs", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListRestoreSnapshotJobs_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ListRestoreSnapshotJobsRequest)) *MilvusServiceServer_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ListRestoreSnapshotJobsRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListRestoreSnapshotJobs_Call) Return(_a0 *milvuspb.ListRestoreSnapshotJobsResponse, _a1 error) *MilvusServiceServer_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListRestoreSnapshotJobs_Call) RunAndReturn(run func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error)) *MilvusServiceServer_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListRowPolicies provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) ListRowPolicies(_a0 context.Context, _a1 *milvuspb.ListRowPoliciesRequest) (*milvuspb.ListRowPoliciesResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -4911,6 +5265,65 @@ func (_c *MilvusServiceServer_ListRowPolicies_Call) RunAndReturn(run func(contex
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) ListSnapshots(_a0 context.Context, _a1 *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListSnapshots")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ListSnapshotsResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListSnapshotsRequest) *milvuspb.ListSnapshotsResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ListSnapshotsResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ListSnapshotsRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_ListSnapshots_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSnapshots'
|
||||
type MilvusServiceServer_ListSnapshots_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ListSnapshots is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ListSnapshotsRequest
|
||||
func (_e *MilvusServiceServer_Expecter) ListSnapshots(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_ListSnapshots_Call {
|
||||
return &MilvusServiceServer_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListSnapshots_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ListSnapshotsRequest)) *MilvusServiceServer_ListSnapshots_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ListSnapshotsRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListSnapshots_Call) Return(_a0 *milvuspb.ListSnapshotsResponse, _a1 error) *MilvusServiceServer_ListSnapshots_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_ListSnapshots_Call) RunAndReturn(run func(context.Context, *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error)) *MilvusServiceServer_ListSnapshots_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListUsersWithTag provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) ListUsersWithTag(_a0 context.Context, _a1 *milvuspb.ListUsersWithTagRequest) (*milvuspb.ListUsersWithTagResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -5914,6 +6327,65 @@ func (_c *MilvusServiceServer_RestoreRBAC_Call) RunAndReturn(run func(context.Co
|
||||
return _c
|
||||
}
|
||||
|
||||
// RestoreSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) RestoreSnapshot(_a0 context.Context, _a1 *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RestoreSnapshot")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.RestoreSnapshotResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.RestoreSnapshotRequest) *milvuspb.RestoreSnapshotResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.RestoreSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.RestoreSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_RestoreSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RestoreSnapshot'
|
||||
type MilvusServiceServer_RestoreSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RestoreSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.RestoreSnapshotRequest
|
||||
func (_e *MilvusServiceServer_Expecter) RestoreSnapshot(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_RestoreSnapshot_Call {
|
||||
return &MilvusServiceServer_RestoreSnapshot_Call{Call: _e.mock.On("RestoreSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_RestoreSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.RestoreSnapshotRequest)) *MilvusServiceServer_RestoreSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.RestoreSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_RestoreSnapshot_Call) Return(_a0 *milvuspb.RestoreSnapshotResponse, _a1 error) *MilvusServiceServer_RestoreSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_RestoreSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error)) *MilvusServiceServer_RestoreSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RunAnalyzer provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) RunAnalyzer(_a0 context.Context, _a1 *milvuspb.RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -6445,6 +6917,65 @@ func (_c *MilvusServiceServer_TransferReplica_Call) RunAndReturn(run func(contex
|
||||
return _c
|
||||
}
|
||||
|
||||
// TruncateCollection provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) TruncateCollection(_a0 context.Context, _a1 *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for TruncateCollection")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.TruncateCollectionRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.TruncateCollectionRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MilvusServiceServer_TruncateCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TruncateCollection'
|
||||
type MilvusServiceServer_TruncateCollection_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// TruncateCollection is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.TruncateCollectionRequest
|
||||
func (_e *MilvusServiceServer_Expecter) TruncateCollection(_a0 interface{}, _a1 interface{}) *MilvusServiceServer_TruncateCollection_Call {
|
||||
return &MilvusServiceServer_TruncateCollection_Call{Call: _e.mock.On("TruncateCollection", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_TruncateCollection_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.TruncateCollectionRequest)) *MilvusServiceServer_TruncateCollection_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.TruncateCollectionRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_TruncateCollection_Call) Return(_a0 *commonpb.Status, _a1 error) *MilvusServiceServer_TruncateCollection_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MilvusServiceServer_TruncateCollection_Call) RunAndReturn(run func(context.Context, *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error)) *MilvusServiceServer_TruncateCollection_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateCredential provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MilvusServiceServer) UpdateCredential(_a0 context.Context, _a1 *milvuspb.UpdateCredentialRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
2
go.mod
2
go.mod
@ -21,7 +21,7 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6
|
||||
github.com/minio/minio-go/v7 v7.0.73
|
||||
github.com/panjf2000/ants/v2 v2.11.3 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@ -799,8 +799,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20250318084424-114f4050c3a6 h1:YHMFI6L
|
||||
github.com/milvus-io/cgosymbolizer v0.0.0-20250318084424-114f4050c3a6/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
|
||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
|
||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45 h1:TMUhlirMCH2zgJD+qClP5EP0yuFl1VrE4j+0fiRSuJU=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6 h1:TeHfsRCdjbX30xS7Npcb+POQXd460+AjmXYmmTuxyBA=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
|
||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
||||
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
|
||||
|
||||
@ -31,6 +31,7 @@ func RegisterDDLCallbacks(s *Server) {
|
||||
Server: s,
|
||||
}
|
||||
ddlCallback.registerIndexCallbacks()
|
||||
registry.RegisterFlushAllV2AckCallback(ddlCallback.flushAllV2AckCallback)
|
||||
}
|
||||
|
||||
type DDLCallbacks struct {
|
||||
|
||||
28
internal/datacoord/ddl_callbacks_flushall.go
Normal file
28
internal/datacoord/ddl_callbacks_flushall.go
Normal file
@ -0,0 +1,28 @@
|
||||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||
)
|
||||
|
||||
func (s *DDLCallbacks) flushAllV2AckCallback(ctx context.Context, result message.BroadcastResultFlushAllMessageV2) error {
|
||||
// An empty callback to indicate it's a ddl or dcl operation.
|
||||
return nil
|
||||
}
|
||||
@ -1931,199 +1931,6 @@ func TestPostFlush(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetFlushAllState(t *testing.T) {
|
||||
tests := []struct {
|
||||
testName string
|
||||
ChannelCPs []Timestamp
|
||||
FlushAllTs Timestamp
|
||||
ServerIsHealthy bool
|
||||
ListDatabaseFailed bool
|
||||
ShowCollectionFailed bool
|
||||
DescribeCollectionFailed bool
|
||||
ExpectedSuccess bool
|
||||
ExpectedFlushed bool
|
||||
}{
|
||||
{
|
||||
"test FlushAll flushed",
|
||||
[]Timestamp{100, 200},
|
||||
99,
|
||||
true, false, false, false, true, true,
|
||||
},
|
||||
{
|
||||
"test FlushAll not flushed",
|
||||
[]Timestamp{100, 200},
|
||||
150,
|
||||
true, false, false, false, true, false,
|
||||
},
|
||||
{
|
||||
"test Sever is not healthy", nil, 0,
|
||||
false, false, false, false, false, false,
|
||||
},
|
||||
{
|
||||
"test ListDatabase failed", nil, 0,
|
||||
true, true, false, false, false, false,
|
||||
},
|
||||
{
|
||||
"test ShowCollections failed", nil, 0,
|
||||
true, false, true, false, false, false,
|
||||
},
|
||||
{
|
||||
"test DescribeCollection failed", nil, 0,
|
||||
true, false, false, true, false, false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
collection := UniqueID(0)
|
||||
vchannels := []string{"mock-vchannel-0", "mock-vchannel-1"}
|
||||
|
||||
svr := &Server{}
|
||||
if test.ServerIsHealthy {
|
||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
}
|
||||
var err error
|
||||
svr.meta = &meta{}
|
||||
svr.mixCoord = mocks.NewMixCoord(t)
|
||||
svr.broker = broker.NewCoordinatorBroker(svr.mixCoord)
|
||||
if test.ListDatabaseFailed {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ListDatabases(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, atr *milvuspb.ListDatabasesRequest) (*milvuspb.ListDatabasesResponse, error) {
|
||||
return &milvuspb.ListDatabasesResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError},
|
||||
}, nil
|
||||
}).Once()
|
||||
} else {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{"db1"},
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
if test.ShowCollectionFailed {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ShowCollections(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError},
|
||||
}, nil).Maybe()
|
||||
} else {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ShowCollections(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{collection},
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
if test.DescribeCollectionFailed {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError},
|
||||
}, nil).Maybe()
|
||||
} else {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: vchannels,
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
svr.meta.channelCPs = newChannelCps()
|
||||
for i, ts := range test.ChannelCPs {
|
||||
channel := vchannels[i]
|
||||
svr.meta.channelCPs.checkpoints[channel] = &msgpb.MsgPosition{
|
||||
ChannelName: channel,
|
||||
Timestamp: ts,
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := svr.GetFlushAllState(context.TODO(), &milvuspb.GetFlushAllStateRequest{FlushAllTs: test.FlushAllTs})
|
||||
assert.NoError(t, err)
|
||||
if test.ExpectedSuccess {
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
} else if test.ServerIsHealthy {
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
|
||||
} else {
|
||||
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
|
||||
}
|
||||
assert.Equal(t, test.ExpectedFlushed, resp.GetFlushed())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFlushAllStateWithDB(t *testing.T) {
|
||||
tests := []struct {
|
||||
testName string
|
||||
FlushAllTs Timestamp
|
||||
DbExist bool
|
||||
ExpectedSuccess bool
|
||||
ExpectedFlushed bool
|
||||
}{
|
||||
{"test FlushAllWithDB, db exist", 99, true, true, true},
|
||||
{"test FlushAllWithDB, db not exist", 99, false, false, false},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
collectionID := UniqueID(0)
|
||||
dbName := "db"
|
||||
collectionName := "collection"
|
||||
vchannels := []string{"mock-vchannel-0", "mock-vchannel-1"}
|
||||
|
||||
svr := &Server{}
|
||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
var err error
|
||||
svr.meta = &meta{}
|
||||
svr.mixCoord = mocks.NewMixCoord(t)
|
||||
svr.broker = broker.NewCoordinatorBroker(svr.mixCoord)
|
||||
|
||||
if test.DbExist {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{dbName},
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
} else {
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ListDatabases(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ListDatabasesResponse{
|
||||
DbNames: []string{},
|
||||
Status: merr.Success(),
|
||||
}, nil).Maybe()
|
||||
}
|
||||
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().ShowCollections(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{collectionID},
|
||||
}, nil).Maybe()
|
||||
|
||||
svr.mixCoord.(*mocks.MixCoord).EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: vchannels,
|
||||
CollectionID: collectionID,
|
||||
CollectionName: collectionName,
|
||||
}, nil).Maybe()
|
||||
|
||||
svr.meta.channelCPs = newChannelCps()
|
||||
channelCPs := []Timestamp{100, 200}
|
||||
for i, ts := range channelCPs {
|
||||
channel := vchannels[i]
|
||||
svr.meta.channelCPs.checkpoints[channel] = &msgpb.MsgPosition{
|
||||
ChannelName: channel,
|
||||
Timestamp: ts,
|
||||
}
|
||||
}
|
||||
|
||||
var resp *milvuspb.GetFlushAllStateResponse
|
||||
resp, err = svr.GetFlushAllState(context.TODO(), &milvuspb.GetFlushAllStateRequest{FlushAllTs: test.FlushAllTs, DbName: dbName})
|
||||
assert.NoError(t, err)
|
||||
if test.ExpectedSuccess {
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
} else {
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
|
||||
}
|
||||
assert.Equal(t, test.ExpectedFlushed, resp.GetFlushed())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataCoordServer_SetSegmentState(t *testing.T) {
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
svr := newTestServer(t)
|
||||
|
||||
@ -21,21 +21,23 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/samber/lo"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||||
"github.com/milvus-io/milvus/internal/coordinator/snmanager"
|
||||
"github.com/milvus-io/milvus/internal/distributed/streaming"
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/binlog"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/balance"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/channel"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/broadcaster/broadcast"
|
||||
"github.com/milvus-io/milvus/internal/util/componentutil"
|
||||
"github.com/milvus-io/milvus/internal/util/importutilv2"
|
||||
"github.com/milvus-io/milvus/internal/util/segmentutil"
|
||||
@ -45,6 +47,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||
@ -173,102 +176,78 @@ func (s *Server) flushCollection(ctx context.Context, collectionID UniqueID, flu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func resolveCollectionsToFlush(ctx context.Context, s *Server, req *datapb.FlushAllRequest) ([]int64, error) {
|
||||
collectionsToFlush := make([]int64, 0)
|
||||
if len(req.GetFlushTargets()) > 0 {
|
||||
// Use flush_targets from request
|
||||
for _, target := range req.GetFlushTargets() {
|
||||
collectionsToFlush = append(collectionsToFlush, target.GetCollectionIds()...)
|
||||
}
|
||||
} else if req.GetDbName() != "" {
|
||||
// Backward compatibility: use deprecated db_name field
|
||||
showColRsp, err := s.broker.ShowCollectionIDs(ctx, req.GetDbName())
|
||||
if err != nil {
|
||||
log.Warn("failed to ShowCollectionIDs", zap.String("db", req.GetDbName()), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
for _, dbCollection := range showColRsp.GetDbCollections() {
|
||||
collectionsToFlush = append(collectionsToFlush, dbCollection.GetCollectionIDs()...)
|
||||
}
|
||||
} else {
|
||||
// Flush all databases
|
||||
dbsResp, err := s.broker.ListDatabases(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, dbName := range dbsResp.GetDbNames() {
|
||||
showColRsp, err := s.broker.ShowCollectionIDs(ctx, dbName)
|
||||
if err != nil {
|
||||
log.Warn("failed to ShowCollectionIDs", zap.String("db", dbName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
for _, dbCollection := range showColRsp.GetDbCollections() {
|
||||
collectionsToFlush = append(collectionsToFlush, dbCollection.GetCollectionIDs()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return collectionsToFlush, nil
|
||||
}
|
||||
|
||||
func (s *Server) FlushAll(ctx context.Context, req *datapb.FlushAllRequest) (*datapb.FlushAllResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
log.Info("receive flushAll request")
|
||||
ctx, sp := otel.Tracer(typeutil.DataCoordRole).Start(ctx, "DataCoord-Flush")
|
||||
defer sp.End()
|
||||
log.Ctx(ctx).Info("receive FlushAll request")
|
||||
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
log.Info("server is not healthy", zap.Error(err), zap.Any("stateCode", s.GetStateCode()))
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// generate a timestamp timeOfSeal, all data before timeOfSeal is guaranteed to be sealed or flushed
|
||||
ts, err := s.allocator.AllocTimestamp(ctx)
|
||||
if err != nil {
|
||||
log.Warn("unable to alloc timestamp", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// resolve collections to flush
|
||||
collectionsToFlush, err := resolveCollectionsToFlush(ctx, s, req)
|
||||
// Create a new broadcaster with exclusive cluster resource key.
|
||||
broadcaster, err := broadcast.StartBroadcastWithResourceKeys(ctx, message.NewExclusiveClusterResourceKey())
|
||||
if err != nil {
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
defer broadcaster.Close()
|
||||
|
||||
var mu sync.Mutex
|
||||
flushInfos := make([]*datapb.FlushResult, 0)
|
||||
wg := errgroup.Group{}
|
||||
// limit goroutine number to 100
|
||||
wg.SetLimit(100)
|
||||
for _, cid := range collectionsToFlush {
|
||||
wg.Go(func() error {
|
||||
flushResult, err := s.flushCollection(ctx, cid, ts, nil)
|
||||
if err != nil {
|
||||
log.Warn("failed to flush collection", zap.Int64("collectionID", cid), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
flushInfos = append(flushInfos, flushResult)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = wg.Wait()
|
||||
// Get broadcast pchannels
|
||||
balancer, err := balance.GetWithContext(ctx)
|
||||
if err != nil {
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
latestAssignment, err := balancer.GetLatestChannelAssignment()
|
||||
if err != nil {
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
controlChannel := streaming.WAL().ControlChannel()
|
||||
pchannels := lo.MapToSlice(latestAssignment.PChannelView.Channels, func(_ channel.ChannelID, channel *channel.PChannelMeta) string {
|
||||
return channel.Name()
|
||||
})
|
||||
broadcastPChannels := lo.Map(pchannels, func(pchannel string, _ int) string {
|
||||
if funcutil.IsOnPhysicalChannel(controlChannel, pchannel) {
|
||||
// return control channel if the control channel is on the pchannel.
|
||||
return controlChannel
|
||||
}
|
||||
return pchannel
|
||||
})
|
||||
|
||||
res, err := broadcaster.Broadcast(ctx, message.NewFlushAllMessageBuilderV2().
|
||||
WithHeader(&message.FlushAllMessageHeader{}).
|
||||
WithBody(&message.FlushAllMessageBody{}).
|
||||
WithBroadcast(broadcastPChannels).
|
||||
MustBuildBroadcast(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Warn("broadcast FlushAllMessage fail", zap.Error(err))
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Status(err),
|
||||
}, nil
|
||||
}
|
||||
flushAllTss := make(map[string]uint64, len(res.AppendResults))
|
||||
for appendChannel, result := range res.AppendResults {
|
||||
// if is control channel, convert it to physical channel.
|
||||
// it's ok to call ToPhysicalChannel even if it's a physical channel,
|
||||
// so no need to check if it's a control channel here.
|
||||
channel := funcutil.ToPhysicalChannel(appendChannel)
|
||||
flushAllTss[channel] = result.TimeTick
|
||||
}
|
||||
log.Ctx(ctx).Info("FlushAll successfully", zap.Strings("broadcastedPChannels", broadcastPChannels), zap.Any("flushAllTss", flushAllTss))
|
||||
return &datapb.FlushAllResponse{
|
||||
Status: merr.Success(),
|
||||
FlushTs: ts,
|
||||
FlushResults: flushInfos,
|
||||
Status: merr.Success(),
|
||||
FlushAllTss: flushAllTss,
|
||||
ClusterInfo: &milvuspb.ClusterInfo{
|
||||
ClusterId: Params.CommonCfg.ClusterID.GetValue(),
|
||||
Cchannel: controlChannel,
|
||||
Pchannels: pchannels,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -1525,7 +1504,7 @@ func (s *Server) getChannelsByCollectionID(ctx context.Context, collectionID int
|
||||
|
||||
// GetFlushAllState checks if all DML messages before `FlushAllTs` have been flushed.
|
||||
func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error) {
|
||||
log := log.Ctx(ctx)
|
||||
log := log.Ctx(ctx).WithRateGroup("dc.GetFlushAllState", 1, 60)
|
||||
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
||||
return &milvuspb.GetFlushAllStateResponse{
|
||||
Status: merr.Status(err),
|
||||
@ -1533,10 +1512,13 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
|
||||
}
|
||||
|
||||
resp := &milvuspb.GetFlushAllStateResponse{
|
||||
Status: merr.Success(),
|
||||
FlushStates: make([]*milvuspb.FlushAllState, 0),
|
||||
Status: merr.Success(),
|
||||
}
|
||||
|
||||
// TODO: Introduce pchannel level flush checkpoint to
|
||||
// check if the flush is complete.
|
||||
// Rather than validate every vchannel checkpoint.
|
||||
|
||||
dbsRsp, err := s.broker.ListDatabases(ctx)
|
||||
if err != nil {
|
||||
log.Warn("failed to ListDatabases", zap.Error(err))
|
||||
@ -1544,53 +1526,10 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Determine which databases to check
|
||||
var targetDbs []string
|
||||
if len(req.GetFlushTargets()) > 0 {
|
||||
// Use flush_targets from request
|
||||
for _, target := range req.GetFlushTargets() {
|
||||
if target.GetDbName() != "" {
|
||||
if !lo.Contains(dbsRsp.DbNames, target.GetDbName()) {
|
||||
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(target.GetDbName()))
|
||||
return resp, nil
|
||||
}
|
||||
targetDbs = append(targetDbs, target.GetDbName())
|
||||
}
|
||||
}
|
||||
} else if req.GetDbName() != "" {
|
||||
if !lo.Contains(dbsRsp.DbNames, req.GetDbName()) {
|
||||
resp.Status = merr.Status(merr.WrapErrDatabaseNotFound(req.GetDbName()))
|
||||
return resp, nil
|
||||
}
|
||||
// Backward compatibility: use deprecated db_name field
|
||||
targetDbs = []string{req.GetDbName()}
|
||||
} else {
|
||||
// Check all databases
|
||||
targetDbs = dbsRsp.DbNames
|
||||
}
|
||||
|
||||
// Remove duplicates
|
||||
targetDbs = lo.Uniq(targetDbs)
|
||||
targetDbs := lo.Uniq(dbsRsp.DbNames)
|
||||
allFlushed := true
|
||||
|
||||
OUTER:
|
||||
for _, dbName := range targetDbs {
|
||||
flushState := &milvuspb.FlushAllState{
|
||||
DbName: dbName,
|
||||
CollectionFlushStates: make(map[string]bool),
|
||||
}
|
||||
|
||||
// Get collections to check for this database
|
||||
var targetCollections []string
|
||||
if len(req.GetFlushTargets()) > 0 {
|
||||
// Check if specific collections are requested for this db
|
||||
for _, target := range req.GetFlushTargets() {
|
||||
if target.GetDbName() == dbName && len(target.GetCollectionNames()) > 0 {
|
||||
targetCollections = target.GetCollectionNames()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
showColRsp, err := s.broker.ShowCollections(ctx, dbName)
|
||||
if err != nil {
|
||||
log.Warn("failed to ShowCollections", zap.String("db", dbName), zap.Error(err))
|
||||
@ -1598,38 +1537,37 @@ func (s *Server) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAll
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
for idx, collectionID := range showColRsp.GetCollectionIds() {
|
||||
collectionName := ""
|
||||
if idx < len(showColRsp.GetCollectionNames()) {
|
||||
collectionName = showColRsp.GetCollectionNames()[idx]
|
||||
}
|
||||
|
||||
// If specific collections are requested, skip others
|
||||
if len(targetCollections) > 0 && !lo.Contains(targetCollections, collectionName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, collectionID := range showColRsp.GetCollectionIds() {
|
||||
describeColRsp, err := s.broker.DescribeCollectionInternal(ctx, collectionID)
|
||||
if err != nil {
|
||||
log.Warn("failed to DescribeCollectionInternal",
|
||||
zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||
log.Warn("failed to DescribeCollectionInternal", zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||
resp.Status = merr.Status(err)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
collectionFlushed := true
|
||||
for _, channel := range describeColRsp.GetVirtualChannelNames() {
|
||||
channelCP := s.meta.GetChannelCheckpoint(channel)
|
||||
if channelCP == nil || channelCP.GetTimestamp() < req.GetFlushAllTs() {
|
||||
collectionFlushed = false
|
||||
pchannel := funcutil.ToPhysicalChannel(channel)
|
||||
flushAllTs, ok := req.GetFlushAllTss()[pchannel]
|
||||
if !ok || flushAllTs == 0 {
|
||||
log.Warn("FlushAllTs not found for pchannel", zap.String("pchannel", pchannel), zap.Uint64("flushAllTs", flushAllTs))
|
||||
resp.Status = merr.Status(merr.WrapErrParameterInvalidMsg("FlushAllTs not found for pchannel %s", pchannel))
|
||||
return resp, nil
|
||||
}
|
||||
if channelCP == nil || channelCP.GetTimestamp() < flushAllTs {
|
||||
allFlushed = false
|
||||
break
|
||||
log.RatedInfo(10, "channel unflushed",
|
||||
zap.String("vchannel", channel),
|
||||
zap.Uint64("flushAllTs", flushAllTs),
|
||||
zap.Uint64("channelCP", channelCP.GetTimestamp()),
|
||||
)
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
flushState.CollectionFlushStates[collectionName] = collectionFlushed
|
||||
}
|
||||
}
|
||||
|
||||
resp.FlushStates = append(resp.FlushStates, flushState)
|
||||
if allFlushed {
|
||||
log.Info("GetFlushAllState all flushed", zap.Any("flushAllTss", req.GetFlushAllTss()))
|
||||
}
|
||||
|
||||
resp.Flushed = allFlushed
|
||||
|
||||
@ -24,26 +24,36 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/coordinator/snmanager"
|
||||
"github.com/milvus-io/milvus/internal/datacoord/allocator"
|
||||
"github.com/milvus-io/milvus/internal/datacoord/broker"
|
||||
"github.com/milvus-io/milvus/internal/distributed/streaming"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/metastore/mocks"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
mocks2 "github.com/milvus-io/milvus/internal/mocks"
|
||||
"github.com/milvus-io/milvus/internal/mocks/distributed/mock_streaming"
|
||||
"github.com/milvus-io/milvus/internal/mocks/streamingcoord/server/mock_balancer"
|
||||
"github.com/milvus-io/milvus/internal/mocks/streamingcoord/server/mock_broadcaster"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/balance"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/channel"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/broadcaster/broadcast"
|
||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/broadcaster/registry"
|
||||
"github.com/milvus-io/milvus/internal/tso"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/pkg/v2/kv"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||
types2 "github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
|
||||
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
@ -2107,418 +2117,72 @@ func TestServer_FlushAll(t *testing.T) {
|
||||
assert.Error(t, merr.Error(resp.GetStatus()))
|
||||
})
|
||||
|
||||
t.Run("allocator error", func(t *testing.T) {
|
||||
t.Run("flush all successfully", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
server.handler = NewNMockHandler(t)
|
||||
|
||||
// Mock allocator AllocTimestamp to return error
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(0), errors.New("alloc error")).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
// Mock WAL
|
||||
wal := mock_streaming.NewMockWALAccesser(t)
|
||||
wal.EXPECT().ControlChannel().Return(funcutil.GetControlChannel("by-dev-rootcoord-dml_0")).Maybe()
|
||||
streaming.SetWALForTest(wal)
|
||||
|
||||
// Mock broadcaster
|
||||
bapi := mock_broadcaster.NewMockBroadcastAPI(t)
|
||||
bapi.EXPECT().Broadcast(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, msg message.BroadcastMutableMessage) (*types2.BroadcastAppendResult, error) {
|
||||
results := make(map[string]*message.AppendResult)
|
||||
for _, vchannel := range msg.BroadcastHeader().VChannels {
|
||||
results[vchannel] = &message.AppendResult{
|
||||
MessageID: rmq.NewRmqID(1),
|
||||
TimeTick: tsoutil.ComposeTSByTime(time.Now(), 0),
|
||||
LastConfirmedMessageID: rmq.NewRmqID(1),
|
||||
}
|
||||
}
|
||||
retry.Do(context.Background(), func() error {
|
||||
log.Info("broadcast message", log.FieldMessage(msg))
|
||||
return registry.CallMessageAckCallback(context.Background(), msg, results)
|
||||
}, retry.AttemptAlways())
|
||||
return &types2.BroadcastAppendResult{
|
||||
BroadcastID: 1,
|
||||
AppendResults: lo.MapValues(results, func(result *message.AppendResult, vchannel string) *types2.AppendResult {
|
||||
return &types2.AppendResult{
|
||||
MessageID: result.MessageID,
|
||||
TimeTick: result.TimeTick,
|
||||
LastConfirmedMessageID: result.LastConfirmedMessageID,
|
||||
}
|
||||
}),
|
||||
}, nil
|
||||
})
|
||||
bapi.EXPECT().Close().Return()
|
||||
|
||||
// Register mock broadcaster
|
||||
mb := mock_broadcaster.NewMockBroadcaster(t)
|
||||
mb.EXPECT().WithResourceKeys(mock.Anything, mock.Anything).Return(bapi, nil)
|
||||
mb.EXPECT().Close().Return().Maybe()
|
||||
broadcast.ResetBroadcaster()
|
||||
broadcast.Register(mb)
|
||||
|
||||
// Register mock balancer
|
||||
snmanager.ResetStreamingNodeManager()
|
||||
b := mock_balancer.NewMockBalancer(t)
|
||||
b.EXPECT().WatchChannelAssignments(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, callback balancer.WatchChannelAssignmentsCallback) error {
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}).Maybe()
|
||||
b.EXPECT().GetLatestChannelAssignment().Return(&balancer.WatchChannelAssignmentsCallbackParam{
|
||||
PChannelView: &channel.PChannelView{
|
||||
Channels: map[channel.ChannelID]*channel.PChannelMeta{
|
||||
{Name: "by-dev-1"}: channel.NewPChannelMeta("by-dev-1", types2.AccessModeRW),
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
b.EXPECT().WaitUntilWALbasedDDLReady(mock.Anything).Return(nil)
|
||||
balance.Register(b)
|
||||
|
||||
req := &datapb.FlushAllRequest{}
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, resp)
|
||||
})
|
||||
|
||||
t.Run("broker ListDatabases error", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ListDatabases to return error
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(nil, errors.New("list databases error")).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{} // No specific targets, should list all databases
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Error(t, merr.Error(resp.GetStatus()))
|
||||
})
|
||||
|
||||
t.Run("broker ShowCollectionIDs error", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ShowCollectionIDs to return error
|
||||
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(nil, errors.New("broker error")).Build()
|
||||
defer mockShowCollectionIDs.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{
|
||||
DbName: "test-db",
|
||||
}
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Error(t, merr.Error(resp.GetStatus()))
|
||||
})
|
||||
|
||||
t.Run("empty collections in database", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ShowCollectionIDs returns empty collections
|
||||
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
|
||||
Status: merr.Success(),
|
||||
DbCollections: []*rootcoordpb.DBCollections{
|
||||
{
|
||||
DbName: "empty-db",
|
||||
CollectionIDs: []int64{}, // Empty collections
|
||||
},
|
||||
},
|
||||
}, nil).Build()
|
||||
defer mockShowCollectionIDs.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{
|
||||
DbName: "empty-db",
|
||||
}
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, uint64(12345), resp.GetFlushTs())
|
||||
assert.Equal(t, 0, len(resp.GetFlushResults()))
|
||||
})
|
||||
|
||||
t.Run("flush specific database successfully", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ShowCollectionIDs
|
||||
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
|
||||
Status: merr.Success(),
|
||||
DbCollections: []*rootcoordpb.DBCollections{
|
||||
{
|
||||
DbName: "test-db",
|
||||
CollectionIDs: []int64{100, 101},
|
||||
},
|
||||
},
|
||||
}, nil).Build()
|
||||
defer mockShowCollectionIDs.UnPatch()
|
||||
|
||||
// Add collections to server meta with collection names
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection1",
|
||||
},
|
||||
VChannelNames: []string{"channel1"},
|
||||
})
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 101,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection2",
|
||||
},
|
||||
VChannelNames: []string{"channel2"},
|
||||
})
|
||||
|
||||
// Mock handler GetCollection to return collection info
|
||||
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
|
||||
if collectionID == 100 {
|
||||
return &collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection1",
|
||||
},
|
||||
}, nil
|
||||
} else if collectionID == 101 {
|
||||
return &collectionInfo{
|
||||
ID: 101,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection2",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockGetCollection.UnPatch()
|
||||
|
||||
// Mock flushCollection to return success results
|
||||
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
|
||||
var collectionName string
|
||||
if collectionID == 100 {
|
||||
collectionName = "collection1"
|
||||
} else if collectionID == 101 {
|
||||
collectionName = "collection2"
|
||||
}
|
||||
|
||||
return &datapb.FlushResult{
|
||||
CollectionID: collectionID,
|
||||
DbName: "test-db",
|
||||
CollectionName: collectionName,
|
||||
SegmentIDs: []int64{1000 + collectionID, 2000 + collectionID},
|
||||
FlushSegmentIDs: []int64{1000 + collectionID, 2000 + collectionID},
|
||||
TimeOfSeal: 12300,
|
||||
FlushTs: flushTs,
|
||||
ChannelCps: make(map[string]*msgpb.MsgPosition),
|
||||
}, nil
|
||||
}).Build()
|
||||
defer mockFlushCollection.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{
|
||||
DbName: "test-db",
|
||||
}
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, uint64(12345), resp.GetFlushTs())
|
||||
assert.Equal(t, 2, len(resp.GetFlushResults()))
|
||||
|
||||
// Verify flush results
|
||||
resultMap := make(map[int64]*datapb.FlushResult)
|
||||
for _, result := range resp.GetFlushResults() {
|
||||
resultMap[result.GetCollectionID()] = result
|
||||
}
|
||||
|
||||
assert.Contains(t, resultMap, int64(100))
|
||||
assert.Contains(t, resultMap, int64(101))
|
||||
assert.Equal(t, "test-db", resultMap[100].GetDbName())
|
||||
assert.Equal(t, "collection1", resultMap[100].GetCollectionName())
|
||||
assert.Equal(t, "collection2", resultMap[101].GetCollectionName())
|
||||
})
|
||||
|
||||
t.Run("flush with specific flush targets successfully", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ShowCollectionIDs
|
||||
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).Return(&rootcoordpb.ShowCollectionIDsResponse{
|
||||
Status: merr.Success(),
|
||||
DbCollections: []*rootcoordpb.DBCollections{
|
||||
{
|
||||
DbName: "test-db",
|
||||
CollectionIDs: []int64{100, 101},
|
||||
},
|
||||
},
|
||||
}, nil).Build()
|
||||
defer mockShowCollectionIDs.UnPatch()
|
||||
|
||||
// Add collections to server meta with collection names
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "target-collection",
|
||||
},
|
||||
VChannelNames: []string{"channel1"},
|
||||
})
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 101,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "other-collection",
|
||||
},
|
||||
VChannelNames: []string{"channel2"},
|
||||
})
|
||||
|
||||
// Mock handler GetCollection to return collection info
|
||||
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
|
||||
if collectionID == 100 {
|
||||
return &collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "target-collection",
|
||||
},
|
||||
}, nil
|
||||
} else if collectionID == 101 {
|
||||
return &collectionInfo{
|
||||
ID: 101,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "other-collection",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockGetCollection.UnPatch()
|
||||
|
||||
// Mock flushCollection to return success result
|
||||
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
|
||||
return &datapb.FlushResult{
|
||||
CollectionID: collectionID,
|
||||
DbName: "test-db",
|
||||
CollectionName: "target-collection",
|
||||
SegmentIDs: []int64{1100, 2100},
|
||||
FlushSegmentIDs: []int64{1100, 2100},
|
||||
TimeOfSeal: 12300,
|
||||
FlushTs: flushTs,
|
||||
ChannelCps: make(map[string]*msgpb.MsgPosition),
|
||||
}, nil
|
||||
}).Build()
|
||||
defer mockFlushCollection.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{
|
||||
FlushTargets: []*datapb.FlushAllTarget{
|
||||
{
|
||||
DbName: "test-db",
|
||||
CollectionIds: []int64{100},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, uint64(12345), resp.GetFlushTs())
|
||||
assert.Equal(t, 1, len(resp.GetFlushResults()))
|
||||
|
||||
// Verify only the target collection was flushed
|
||||
result := resp.GetFlushResults()[0]
|
||||
assert.Equal(t, int64(100), result.GetCollectionID())
|
||||
assert.Equal(t, "test-db", result.GetDbName())
|
||||
assert.Equal(t, "target-collection", result.GetCollectionName())
|
||||
assert.Equal(t, []int64{1100, 2100}, result.GetSegmentIDs())
|
||||
assert.Equal(t, []int64{1100, 2100}, result.GetFlushSegmentIDs())
|
||||
})
|
||||
|
||||
t.Run("flush all databases successfully", func(t *testing.T) {
|
||||
server := createTestFlushAllServer()
|
||||
server.handler = NewNMockHandler(t) // Initialize handler with testing.T
|
||||
|
||||
// Mock allocator AllocTimestamp
|
||||
mockAllocTimestamp := mockey.Mock(mockey.GetMethod(server.allocator, "AllocTimestamp")).Return(uint64(12345), nil).Build()
|
||||
defer mockAllocTimestamp.UnPatch()
|
||||
|
||||
// Mock broker ListDatabases
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"db1", "db2"},
|
||||
}, nil).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
// Mock broker ShowCollectionIDs for different databases
|
||||
mockShowCollectionIDs := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollectionIDs")).To(func(ctx context.Context, dbNames ...string) (*rootcoordpb.ShowCollectionIDsResponse, error) {
|
||||
if len(dbNames) == 0 {
|
||||
return nil, errors.New("no database names provided")
|
||||
}
|
||||
dbName := dbNames[0] // Use the first database name
|
||||
if dbName == "db1" {
|
||||
return &rootcoordpb.ShowCollectionIDsResponse{
|
||||
Status: merr.Success(),
|
||||
DbCollections: []*rootcoordpb.DBCollections{
|
||||
{
|
||||
DbName: "db1",
|
||||
CollectionIDs: []int64{100},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
if dbName == "db2" {
|
||||
return &rootcoordpb.ShowCollectionIDsResponse{
|
||||
Status: merr.Success(),
|
||||
DbCollections: []*rootcoordpb.DBCollections{
|
||||
{
|
||||
DbName: "db2",
|
||||
CollectionIDs: []int64{200},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("unknown database")
|
||||
}).Build()
|
||||
defer mockShowCollectionIDs.UnPatch()
|
||||
|
||||
// Add collections to server meta with collection names
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection1",
|
||||
},
|
||||
VChannelNames: []string{"channel1"},
|
||||
})
|
||||
server.meta.AddCollection(&collectionInfo{
|
||||
ID: 200,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection2",
|
||||
},
|
||||
VChannelNames: []string{"channel2"},
|
||||
})
|
||||
|
||||
// Mock handler GetCollection to return collection info
|
||||
mockGetCollection := mockey.Mock(mockey.GetMethod(server.handler, "GetCollection")).To(func(ctx context.Context, collectionID int64) (*collectionInfo, error) {
|
||||
if collectionID == 100 {
|
||||
return &collectionInfo{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection1",
|
||||
},
|
||||
}, nil
|
||||
} else if collectionID == 200 {
|
||||
return &collectionInfo{
|
||||
ID: 200,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "collection2",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockGetCollection.UnPatch()
|
||||
|
||||
// Mock flushCollection for different collections
|
||||
mockFlushCollection := mockey.Mock(mockey.GetMethod(server, "flushCollection")).To(func(ctx context.Context, collectionID int64, flushTs uint64, toFlushSegments []int64) (*datapb.FlushResult, error) {
|
||||
var dbName, collectionName string
|
||||
if collectionID == 100 {
|
||||
dbName = "db1"
|
||||
collectionName = "collection1"
|
||||
} else if collectionID == 200 {
|
||||
dbName = "db2"
|
||||
collectionName = "collection2"
|
||||
}
|
||||
|
||||
return &datapb.FlushResult{
|
||||
CollectionID: collectionID,
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
SegmentIDs: []int64{collectionID + 1000, collectionID + 2000},
|
||||
FlushSegmentIDs: []int64{collectionID + 1000, collectionID + 2000},
|
||||
TimeOfSeal: 12300,
|
||||
FlushTs: flushTs,
|
||||
ChannelCps: make(map[string]*msgpb.MsgPosition),
|
||||
}, nil
|
||||
}).Build()
|
||||
defer mockFlushCollection.UnPatch()
|
||||
|
||||
req := &datapb.FlushAllRequest{} // No specific targets, flush all databases
|
||||
|
||||
resp, err := server.FlushAll(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, uint64(12345), resp.GetFlushTs())
|
||||
assert.Equal(t, 2, len(resp.GetFlushResults()))
|
||||
|
||||
// Verify results from both databases
|
||||
resultMap := make(map[string]*datapb.FlushResult)
|
||||
for _, result := range resp.GetFlushResults() {
|
||||
resultMap[result.GetDbName()] = result
|
||||
}
|
||||
|
||||
assert.Contains(t, resultMap, "db1")
|
||||
assert.Contains(t, resultMap, "db2")
|
||||
assert.Equal(t, int64(100), resultMap["db1"].GetCollectionID())
|
||||
assert.Equal(t, int64(200), resultMap["db2"].GetCollectionID())
|
||||
})
|
||||
}
|
||||
|
||||
@ -2543,9 +2207,7 @@ func TestServer_GetFlushAllState(t *testing.T) {
|
||||
server := &Server{}
|
||||
server.stateCode.Store(commonpb.StateCode_Abnormal)
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
}
|
||||
req := &milvuspb.GetFlushAllStateRequest{}
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
@ -2559,9 +2221,7 @@ func TestServer_GetFlushAllState(t *testing.T) {
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(nil, errors.New("list databases error")).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
}
|
||||
req := &milvuspb.GetFlushAllStateRequest{}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
@ -2569,7 +2229,7 @@ func TestServer_GetFlushAllState(t *testing.T) {
|
||||
assert.Error(t, merr.Error(resp.GetStatus()))
|
||||
})
|
||||
|
||||
t.Run("check all databases", func(t *testing.T) {
|
||||
t.Run("all flushed", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases
|
||||
@ -2622,71 +2282,19 @@ func TestServer_GetFlushAllState(t *testing.T) {
|
||||
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 15000}
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345, // No specific targets, check all databases
|
||||
FlushAllTss: map[string]uint64{
|
||||
"channel1": 15000,
|
||||
"channel2": 15000,
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 2, len(resp.GetFlushStates()))
|
||||
|
||||
// Check both databases are present
|
||||
dbNames := make(map[string]bool)
|
||||
for _, flushState := range resp.GetFlushStates() {
|
||||
dbNames[flushState.GetDbName()] = true
|
||||
}
|
||||
assert.True(t, dbNames["db1"])
|
||||
assert.True(t, dbNames["db2"])
|
||||
assert.True(t, resp.GetFlushed()) // Overall flushed
|
||||
assert.NoError(t, merr.CheckRPCCall(resp, err))
|
||||
assert.True(t, resp.GetFlushed())
|
||||
})
|
||||
|
||||
t.Run("channel checkpoint not found", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"test-db"},
|
||||
}, nil).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
// Mock ShowCollections
|
||||
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{100},
|
||||
CollectionNames: []string{"collection1"},
|
||||
}, nil).Build()
|
||||
defer mockShowCollections.UnPatch()
|
||||
|
||||
// Mock DescribeCollectionInternal
|
||||
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel1"},
|
||||
}, nil).Build()
|
||||
defer mockDescribeCollection.UnPatch()
|
||||
|
||||
// No channel checkpoint set - should be considered not flushed
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
DbName: "test-db",
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 1, len(resp.GetFlushStates()))
|
||||
|
||||
flushState := resp.GetFlushStates()[0]
|
||||
assert.Equal(t, "test-db", flushState.GetDbName())
|
||||
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates()))
|
||||
assert.False(t, flushState.GetCollectionFlushStates()["collection1"]) // Not flushed
|
||||
assert.False(t, resp.GetFlushed()) // Overall not flushed
|
||||
})
|
||||
|
||||
t.Run("channel checkpoint timestamp too low", func(t *testing.T) {
|
||||
t.Run("not flushed, channel checkpoint too old", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases
|
||||
@ -2715,218 +2323,15 @@ func TestServer_GetFlushAllState(t *testing.T) {
|
||||
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 10000}
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
DbName: "test-db",
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 1, len(resp.GetFlushStates()))
|
||||
|
||||
flushState := resp.GetFlushStates()[0]
|
||||
assert.Equal(t, "test-db", flushState.GetDbName())
|
||||
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates()))
|
||||
assert.False(t, flushState.GetCollectionFlushStates()["collection1"]) // Not flushed
|
||||
assert.False(t, resp.GetFlushed()) // Overall not flushed
|
||||
})
|
||||
|
||||
t.Run("specific database flushed successfully", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases (called even when DbName is specified)
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"test-db"},
|
||||
}, nil).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
// Mock ShowCollections for specific database
|
||||
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{100, 101},
|
||||
CollectionNames: []string{"collection1", "collection2"},
|
||||
}, nil).Build()
|
||||
defer mockShowCollections.UnPatch()
|
||||
|
||||
// Mock DescribeCollectionInternal
|
||||
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
if collectionID == 100 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel1"},
|
||||
}, nil
|
||||
}
|
||||
if collectionID == 101 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel2"},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockDescribeCollection.UnPatch()
|
||||
|
||||
// Setup channel checkpoints - both flushed (timestamps higher than FlushAllTs)
|
||||
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000}
|
||||
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 16000}
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
DbName: "test-db",
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 1, len(resp.GetFlushStates()))
|
||||
|
||||
flushState := resp.GetFlushStates()[0]
|
||||
assert.Equal(t, "test-db", flushState.GetDbName())
|
||||
assert.Equal(t, 2, len(flushState.GetCollectionFlushStates()))
|
||||
assert.True(t, flushState.GetCollectionFlushStates()["collection1"]) // Flushed
|
||||
assert.True(t, flushState.GetCollectionFlushStates()["collection2"]) // Flushed
|
||||
assert.True(t, resp.GetFlushed()) // Overall flushed
|
||||
})
|
||||
|
||||
t.Run("check with flush targets successfully", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases (called even when FlushTargets are specified)
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"test-db"},
|
||||
}, nil).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
// Mock ShowCollections for specific database
|
||||
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).Return(&milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{100, 101},
|
||||
CollectionNames: []string{"target-collection", "other-collection"},
|
||||
}, nil).Build()
|
||||
defer mockShowCollections.UnPatch()
|
||||
|
||||
// Mock DescribeCollectionInternal
|
||||
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
if collectionID == 100 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel1"},
|
||||
}, nil
|
||||
}
|
||||
if collectionID == 101 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel2"},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockDescribeCollection.UnPatch()
|
||||
|
||||
// Setup channel checkpoints - target collection flushed, other not checked
|
||||
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000}
|
||||
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 10000} // Won't be checked due to filtering
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345,
|
||||
FlushTargets: []*milvuspb.FlushAllTarget{
|
||||
{
|
||||
DbName: "test-db",
|
||||
CollectionNames: []string{"target-collection"},
|
||||
},
|
||||
FlushAllTss: map[string]uint64{
|
||||
"channel1": 15000,
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 1, len(resp.GetFlushStates()))
|
||||
|
||||
flushState := resp.GetFlushStates()[0]
|
||||
assert.Equal(t, "test-db", flushState.GetDbName())
|
||||
assert.Equal(t, 1, len(flushState.GetCollectionFlushStates())) // Only target collection checked
|
||||
assert.True(t, flushState.GetCollectionFlushStates()["target-collection"]) // Flushed
|
||||
assert.True(t, resp.GetFlushed()) // Overall flushed (only checking target collection)
|
||||
})
|
||||
|
||||
t.Run("mixed flush states - partial success", func(t *testing.T) {
|
||||
server := createTestGetFlushAllStateServer()
|
||||
|
||||
// Mock ListDatabases
|
||||
mockListDatabases := mockey.Mock(mockey.GetMethod(server.broker, "ListDatabases")).Return(&milvuspb.ListDatabasesResponse{
|
||||
Status: merr.Success(),
|
||||
DbNames: []string{"db1", "db2"},
|
||||
}, nil).Build()
|
||||
defer mockListDatabases.UnPatch()
|
||||
|
||||
// Mock ShowCollections for different databases
|
||||
mockShowCollections := mockey.Mock(mockey.GetMethod(server.broker, "ShowCollections")).To(func(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
if dbName == "db1" {
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{100},
|
||||
CollectionNames: []string{"collection1"},
|
||||
}, nil
|
||||
}
|
||||
if dbName == "db2" {
|
||||
return &milvuspb.ShowCollectionsResponse{
|
||||
Status: merr.Success(),
|
||||
CollectionIds: []int64{200},
|
||||
CollectionNames: []string{"collection2"},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("unknown db")
|
||||
}).Build()
|
||||
defer mockShowCollections.UnPatch()
|
||||
|
||||
// Mock DescribeCollectionInternal
|
||||
mockDescribeCollection := mockey.Mock(mockey.GetMethod(server.broker, "DescribeCollectionInternal")).To(func(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
if collectionID == 100 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel1"},
|
||||
}, nil
|
||||
}
|
||||
if collectionID == 200 {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Success(),
|
||||
VirtualChannelNames: []string{"channel2"},
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("collection not found")
|
||||
}).Build()
|
||||
defer mockDescribeCollection.UnPatch()
|
||||
|
||||
// Setup channel checkpoints - db1 flushed, db2 not flushed
|
||||
server.meta.channelCPs.checkpoints["channel1"] = &msgpb.MsgPosition{Timestamp: 15000} // Flushed
|
||||
server.meta.channelCPs.checkpoints["channel2"] = &msgpb.MsgPosition{Timestamp: 10000} // Not flushed
|
||||
|
||||
req := &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTs: 12345, // Check all databases
|
||||
}
|
||||
|
||||
resp, err := server.GetFlushAllState(context.Background(), req)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, merr.Error(resp.GetStatus()))
|
||||
assert.Equal(t, 2, len(resp.GetFlushStates()))
|
||||
|
||||
// Verify mixed flush states
|
||||
stateMap := make(map[string]*milvuspb.FlushAllState)
|
||||
for _, state := range resp.GetFlushStates() {
|
||||
stateMap[state.GetDbName()] = state
|
||||
}
|
||||
|
||||
assert.Contains(t, stateMap, "db1")
|
||||
assert.Contains(t, stateMap, "db2")
|
||||
assert.True(t, stateMap["db1"].GetCollectionFlushStates()["collection1"]) // db1 flushed
|
||||
assert.False(t, stateMap["db2"].GetCollectionFlushStates()["collection2"]) // db2 not flushed
|
||||
assert.False(t, resp.GetFlushed()) // Overall not flushed due to db2
|
||||
assert.NoError(t, merr.CheckRPCCall(resp, err))
|
||||
assert.False(t, resp.GetFlushed())
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -274,6 +274,14 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
|
||||
} else {
|
||||
logger.Info("handle manual flush message success")
|
||||
}
|
||||
case commonpb.MsgType_FlushAll:
|
||||
flushAllMsg := msg.(*adaptor.FlushAllMessageBody)
|
||||
log.Info("receive flush all message",
|
||||
zap.String("vchannel", ddn.Name()),
|
||||
zap.Int32("msgType", int32(msg.Type())),
|
||||
zap.Uint64("timetick", flushAllMsg.FlushAllMessage.TimeTick()),
|
||||
)
|
||||
ddn.msgHandler.HandleFlushAll(ddn.vChannelName, flushAllMsg.FlushAllMessage)
|
||||
case commonpb.MsgType_AddCollectionField:
|
||||
schemaMsg := msg.(*adaptor.SchemaChangeMessageBody)
|
||||
logger := log.With(
|
||||
|
||||
@ -99,6 +99,7 @@ func TestFlowGraph_DDNode_OperateFlush(t *testing.T) {
|
||||
h.EXPECT().HandleCreateSegment(mock.Anything, mock.Anything).Return(nil)
|
||||
h.EXPECT().HandleFlush(mock.Anything).Return(nil)
|
||||
h.EXPECT().HandleManualFlush(mock.Anything).Return(nil)
|
||||
h.EXPECT().HandleFlushAll(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
ddn := ddNode{
|
||||
ctx: context.Background(),
|
||||
@ -131,14 +132,24 @@ func TestFlowGraph_DDNode_OperateFlush(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
immutableManualFlushMsg := manualFlushMsg.WithTimeTick(3).IntoImmutableMessage(mock_message.NewMockMessageID(t))
|
||||
|
||||
flushAllMsg, err := message.NewFlushAllMessageBuilderV2().
|
||||
WithHeader(&message.FlushAllMessageHeader{}).
|
||||
WithBody(&message.FlushAllMessageBody{}).
|
||||
WithVChannel("v1").
|
||||
BuildMutable()
|
||||
assert.NoError(t, err)
|
||||
immutableFlushAllMsg := flushAllMsg.WithTimeTick(4).IntoImmutableMessage(mock_message.NewMockMessageID(t))
|
||||
|
||||
msg1, err := adaptor.NewCreateSegmentMessageBody(immutableCreateSegmentMsg)
|
||||
assert.NoError(t, err)
|
||||
msg2, err := adaptor.NewFlushMessageBody(immutableFlushMsg)
|
||||
assert.NoError(t, err)
|
||||
msg3, err := adaptor.NewManualFlushMessageBody(immutableManualFlushMsg)
|
||||
assert.NoError(t, err)
|
||||
msg4, err := adaptor.NewFlushAllMessageBody(immutableFlushAllMsg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tsMessages := []msgstream.TsMsg{msg1, msg2, msg3}
|
||||
tsMessages := []msgstream.TsMsg{msg1, msg2, msg3, msg4}
|
||||
var msgStreamMsg Msg = flowgraph.GenerateMsgStreamMsg(tsMessages, 0, 0, nil, nil)
|
||||
outputMsgs := ddn.Operate([]Msg{msgStreamMsg})
|
||||
assert.NotNil(t, outputMsgs)
|
||||
|
||||
@ -31,6 +31,8 @@ type MsgHandler interface {
|
||||
|
||||
HandleManualFlush(flushMsg message.ImmutableManualFlushMessageV2) error
|
||||
|
||||
HandleFlushAll(vchannel string, flushAllMsg message.ImmutableFlushAllMessageV2) error
|
||||
|
||||
HandleSchemaChange(ctx context.Context, schemaChangeMsg message.ImmutableSchemaChangeMessageV2) error
|
||||
|
||||
HandleAlterCollection(ctx context.Context, alterCollectionMsg message.ImmutableAlterCollectionMessageV2) error
|
||||
|
||||
@ -31,6 +31,8 @@ type BufferManager interface {
|
||||
// SealSegments notifies writeBuffer corresponding to provided channel to seal segments.
|
||||
// which will cause segment start flush procedure.
|
||||
SealSegments(ctx context.Context, channel string, segmentIDs []int64) error
|
||||
// SealAllSegments notifies writeBuffer to seal all segments.
|
||||
SealAllSegments(ctx context.Context, channel string) error
|
||||
// FlushChannel set the flushTs of the provided write buffer.
|
||||
FlushChannel(ctx context.Context, channel string, flushTs uint64) error
|
||||
// RemoveChannel removes a write buffer from manager.
|
||||
@ -199,6 +201,19 @@ func (m *bufferManager) SealSegments(ctx context.Context, channel string, segmen
|
||||
return buf.SealSegments(ctx, segmentIDs)
|
||||
}
|
||||
|
||||
// SealAllSegments seal all segments in the write buffer.
|
||||
func (m *bufferManager) SealAllSegments(ctx context.Context, channel string) error {
|
||||
buf, loaded := m.buffers.Get(channel)
|
||||
if !loaded {
|
||||
log.Ctx(ctx).Warn("write buffer not found",
|
||||
zap.String("channel", channel))
|
||||
return merr.WrapErrChannelNotFound(channel)
|
||||
}
|
||||
|
||||
buf.SealAllSegments(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bufferManager) FlushChannel(ctx context.Context, channel string, flushTs uint64) error {
|
||||
buf, loaded := m.buffers.Get(channel)
|
||||
if !loaded {
|
||||
|
||||
@ -102,6 +102,19 @@ func (s *ManagerSuite) TestFlushSegments() {
|
||||
err := manager.SealSegments(ctx, s.channelName, []int64{1})
|
||||
s.NoError(err)
|
||||
})
|
||||
|
||||
s.Run("seal all segments", func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wb := NewMockWriteBuffer(s.T())
|
||||
s.manager.buffers.Insert(s.channelName, wb)
|
||||
|
||||
wb.EXPECT().SealAllSegments(mock.Anything).Return()
|
||||
|
||||
err := manager.SealAllSegments(ctx, s.channelName)
|
||||
s.NoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ManagerSuite) TestCreateNewGrowingSegment() {
|
||||
|
||||
@ -434,6 +434,53 @@ func (_c *MockBufferManager_RemoveChannel_Call) RunAndReturn(run func(string)) *
|
||||
return _c
|
||||
}
|
||||
|
||||
// SealAllSegments provides a mock function with given fields: ctx, channel
|
||||
func (_m *MockBufferManager) SealAllSegments(ctx context.Context, channel string) error {
|
||||
ret := _m.Called(ctx, channel)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SealAllSegments")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, channel)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockBufferManager_SealAllSegments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SealAllSegments'
|
||||
type MockBufferManager_SealAllSegments_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SealAllSegments is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - channel string
|
||||
func (_e *MockBufferManager_Expecter) SealAllSegments(ctx interface{}, channel interface{}) *MockBufferManager_SealAllSegments_Call {
|
||||
return &MockBufferManager_SealAllSegments_Call{Call: _e.mock.On("SealAllSegments", ctx, channel)}
|
||||
}
|
||||
|
||||
func (_c *MockBufferManager_SealAllSegments_Call) Run(run func(ctx context.Context, channel string)) *MockBufferManager_SealAllSegments_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBufferManager_SealAllSegments_Call) Return(_a0 error) *MockBufferManager_SealAllSegments_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBufferManager_SealAllSegments_Call) RunAndReturn(run func(context.Context, string) error) *MockBufferManager_SealAllSegments_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SealSegments provides a mock function with given fields: ctx, channel, segmentIDs
|
||||
func (_m *MockBufferManager) SealSegments(ctx context.Context, channel string, segmentIDs []int64) error {
|
||||
ret := _m.Called(ctx, channel, segmentIDs)
|
||||
|
||||
@ -404,6 +404,39 @@ func (_c *MockWriteBuffer_MemorySize_Call) RunAndReturn(run func() int64) *MockW
|
||||
return _c
|
||||
}
|
||||
|
||||
// SealAllSegments provides a mock function with given fields: ctx
|
||||
func (_m *MockWriteBuffer) SealAllSegments(ctx context.Context) {
|
||||
_m.Called(ctx)
|
||||
}
|
||||
|
||||
// MockWriteBuffer_SealAllSegments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SealAllSegments'
|
||||
type MockWriteBuffer_SealAllSegments_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SealAllSegments is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
func (_e *MockWriteBuffer_Expecter) SealAllSegments(ctx interface{}) *MockWriteBuffer_SealAllSegments_Call {
|
||||
return &MockWriteBuffer_SealAllSegments_Call{Call: _e.mock.On("SealAllSegments", ctx)}
|
||||
}
|
||||
|
||||
func (_c *MockWriteBuffer_SealAllSegments_Call) Run(run func(ctx context.Context)) *MockWriteBuffer_SealAllSegments_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockWriteBuffer_SealAllSegments_Call) Return() *MockWriteBuffer_SealAllSegments_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockWriteBuffer_SealAllSegments_Call) RunAndReturn(run func(context.Context)) *MockWriteBuffer_SealAllSegments_Call {
|
||||
_c.Run(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SealSegments provides a mock function with given fields: ctx, segmentIDs
|
||||
func (_m *MockWriteBuffer) SealSegments(ctx context.Context, segmentIDs []int64) error {
|
||||
ret := _m.Called(ctx, segmentIDs)
|
||||
|
||||
@ -47,6 +47,8 @@ type WriteBuffer interface {
|
||||
GetFlushTimestamp() uint64
|
||||
// SealSegments is the method to perform `Sync` operation with provided options.
|
||||
SealSegments(ctx context.Context, segmentIDs []int64) error
|
||||
// SealAllSegments seal all segments in the write buffer.
|
||||
SealAllSegments(ctx context.Context)
|
||||
// DropPartitions mark segments as Dropped of the partition
|
||||
DropPartitions(partitionIDs []int64)
|
||||
// GetCheckpoint returns current channel checkpoint.
|
||||
@ -186,6 +188,15 @@ func (wb *writeBufferBase) SealSegments(ctx context.Context, segmentIDs []int64)
|
||||
return wb.sealSegments(ctx, segmentIDs)
|
||||
}
|
||||
|
||||
func (wb *writeBufferBase) SealAllSegments(ctx context.Context) {
|
||||
wb.mut.RLock()
|
||||
defer wb.mut.RUnlock()
|
||||
|
||||
// mark all segments sealed if they were growing
|
||||
wb.metaCache.UpdateSegments(metacache.UpdateState(commonpb.SegmentState_Sealed),
|
||||
metacache.WithSegmentState(commonpb.SegmentState_Growing))
|
||||
}
|
||||
|
||||
func (wb *writeBufferBase) DropPartitions(partitionIDs []int64) {
|
||||
wb.mut.RLock()
|
||||
defer wb.mut.RUnlock()
|
||||
|
||||
@ -83,6 +83,13 @@ func (s *WriteBufferSuite) TestFlushSegments() {
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *WriteBufferSuite) TestSealAllSegments() {
|
||||
s.metacache.EXPECT().UpdateSegments(mock.Anything, mock.Anything, mock.Anything).Return()
|
||||
wb, err := NewWriteBuffer(s.channelName, s.metacache, s.syncMgr, WithIDAllocator(allocator.NewMockAllocator(s.T())))
|
||||
s.NoError(err)
|
||||
wb.SealAllSegments(context.Background())
|
||||
}
|
||||
|
||||
func (s *WriteBufferSuite) TestGetCheckpoint() {
|
||||
s.Run("use_consume_cp", func() {
|
||||
s.wb.checkpoint = &msgpb.MsgPosition{
|
||||
|
||||
@ -162,6 +162,53 @@ func (_c *MockMsgHandler_HandleFlush_Call) RunAndReturn(run func(message.Immutab
|
||||
return _c
|
||||
}
|
||||
|
||||
// HandleFlushAll provides a mock function with given fields: vchannel, flushAllMsg
|
||||
func (_m *MockMsgHandler) HandleFlushAll(vchannel string, flushAllMsg message.ImmutableFlushAllMessageV2) error {
|
||||
ret := _m.Called(vchannel, flushAllMsg)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HandleFlushAll")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, message.ImmutableFlushAllMessageV2) error); ok {
|
||||
r0 = rf(vchannel, flushAllMsg)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockMsgHandler_HandleFlushAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleFlushAll'
|
||||
type MockMsgHandler_HandleFlushAll_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// HandleFlushAll is a helper method to define mock.On call
|
||||
// - vchannel string
|
||||
// - flushAllMsg message.ImmutableFlushAllMessageV2
|
||||
func (_e *MockMsgHandler_Expecter) HandleFlushAll(vchannel interface{}, flushAllMsg interface{}) *MockMsgHandler_HandleFlushAll_Call {
|
||||
return &MockMsgHandler_HandleFlushAll_Call{Call: _e.mock.On("HandleFlushAll", vchannel, flushAllMsg)}
|
||||
}
|
||||
|
||||
func (_c *MockMsgHandler_HandleFlushAll_Call) Run(run func(vchannel string, flushAllMsg message.ImmutableFlushAllMessageV2)) *MockMsgHandler_HandleFlushAll_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string), args[1].(message.ImmutableFlushAllMessageV2))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMsgHandler_HandleFlushAll_Call) Return(_a0 error) *MockMsgHandler_HandleFlushAll_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMsgHandler_HandleFlushAll_Call) RunAndReturn(run func(string, message.ImmutableFlushAllMessageV2) error) *MockMsgHandler_HandleFlushAll_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// HandleManualFlush provides a mock function with given fields: flushMsg
|
||||
func (_m *MockMsgHandler) HandleManualFlush(flushMsg message.ImmutableManualFlushMessageV2) error {
|
||||
ret := _m.Called(flushMsg)
|
||||
|
||||
@ -918,6 +918,65 @@ func (_c *MockProxy_CheckHealth_Call) RunAndReturn(run func(context.Context, *mi
|
||||
return _c
|
||||
}
|
||||
|
||||
// ComputePhraseMatchSlop provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) ComputePhraseMatchSlop(_a0 context.Context, _a1 *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ComputePhraseMatchSlop")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ComputePhraseMatchSlopResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) *milvuspb.ComputePhraseMatchSlopResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ComputePhraseMatchSlopResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_ComputePhraseMatchSlop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputePhraseMatchSlop'
|
||||
type MockProxy_ComputePhraseMatchSlop_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ComputePhraseMatchSlop is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ComputePhraseMatchSlopRequest
|
||||
func (_e *MockProxy_Expecter) ComputePhraseMatchSlop(_a0 interface{}, _a1 interface{}) *MockProxy_ComputePhraseMatchSlop_Call {
|
||||
return &MockProxy_ComputePhraseMatchSlop_Call{Call: _e.mock.On("ComputePhraseMatchSlop", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ComputePhraseMatchSlop_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ComputePhraseMatchSlopRequest)) *MockProxy_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ComputePhraseMatchSlopRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ComputePhraseMatchSlop_Call) Return(_a0 *milvuspb.ComputePhraseMatchSlopResponse, _a1 error) *MockProxy_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ComputePhraseMatchSlop_Call) RunAndReturn(run func(context.Context, *milvuspb.ComputePhraseMatchSlopRequest) (*milvuspb.ComputePhraseMatchSlopResponse, error)) *MockProxy_ComputePhraseMatchSlop_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Connect provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) Connect(_a0 context.Context, _a1 *milvuspb.ConnectRequest) (*milvuspb.ConnectResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -1613,6 +1672,65 @@ func (_c *MockProxy_CreateRowPolicy_Call) RunAndReturn(run func(context.Context,
|
||||
return _c
|
||||
}
|
||||
|
||||
// CreateSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) CreateSnapshot(_a0 context.Context, _a1 *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateSnapshot")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.CreateSnapshotRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.CreateSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_CreateSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSnapshot'
|
||||
type MockProxy_CreateSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CreateSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.CreateSnapshotRequest
|
||||
func (_e *MockProxy_Expecter) CreateSnapshot(_a0 interface{}, _a1 interface{}) *MockProxy_CreateSnapshot_Call {
|
||||
return &MockProxy_CreateSnapshot_Call{Call: _e.mock.On("CreateSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_CreateSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.CreateSnapshotRequest)) *MockProxy_CreateSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.CreateSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_CreateSnapshot_Call) Return(_a0 *commonpb.Status, _a1 error) *MockProxy_CreateSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_CreateSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.CreateSnapshotRequest) (*commonpb.Status, error)) *MockProxy_CreateSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Delete provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) Delete(_a0 context.Context, _a1 *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -2144,6 +2262,65 @@ func (_c *MockProxy_DescribeSegmentIndexData_Call) RunAndReturn(run func(context
|
||||
return _c
|
||||
}
|
||||
|
||||
// DescribeSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) DescribeSnapshot(_a0 context.Context, _a1 *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DescribeSnapshot")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.DescribeSnapshotResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DescribeSnapshotRequest) *milvuspb.DescribeSnapshotResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.DescribeSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.DescribeSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_DescribeSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeSnapshot'
|
||||
type MockProxy_DescribeSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DescribeSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.DescribeSnapshotRequest
|
||||
func (_e *MockProxy_Expecter) DescribeSnapshot(_a0 interface{}, _a1 interface{}) *MockProxy_DescribeSnapshot_Call {
|
||||
return &MockProxy_DescribeSnapshot_Call{Call: _e.mock.On("DescribeSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DescribeSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.DescribeSnapshotRequest)) *MockProxy_DescribeSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.DescribeSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DescribeSnapshot_Call) Return(_a0 *milvuspb.DescribeSnapshotResponse, _a1 error) *MockProxy_DescribeSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DescribeSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.DescribeSnapshotRequest) (*milvuspb.DescribeSnapshotResponse, error)) *MockProxy_DescribeSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropAlias provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) DropAlias(_a0 context.Context, _a1 *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -2734,6 +2911,65 @@ func (_c *MockProxy_DropRowPolicy_Call) RunAndReturn(run func(context.Context, *
|
||||
return _c
|
||||
}
|
||||
|
||||
// DropSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) DropSnapshot(_a0 context.Context, _a1 *milvuspb.DropSnapshotRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DropSnapshot")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DropSnapshotRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.DropSnapshotRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.DropSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_DropSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropSnapshot'
|
||||
type MockProxy_DropSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DropSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.DropSnapshotRequest
|
||||
func (_e *MockProxy_Expecter) DropSnapshot(_a0 interface{}, _a1 interface{}) *MockProxy_DropSnapshot_Call {
|
||||
return &MockProxy_DropSnapshot_Call{Call: _e.mock.On("DropSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DropSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.DropSnapshotRequest)) *MockProxy_DropSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.DropSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DropSnapshot_Call) Return(_a0 *commonpb.Status, _a1 error) *MockProxy_DropSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_DropSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.DropSnapshotRequest) (*commonpb.Status, error)) *MockProxy_DropSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Dummy provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) Dummy(_a0 context.Context, _a1 *milvuspb.DummyRequest) (*milvuspb.DummyResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -4311,6 +4547,65 @@ func (_c *MockProxy_GetReplicateInfo_Call) RunAndReturn(run func(context.Context
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetRestoreSnapshotState provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) GetRestoreSnapshotState(_a0 context.Context, _a1 *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetRestoreSnapshotState")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.GetRestoreSnapshotStateResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) *milvuspb.GetRestoreSnapshotStateResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.GetRestoreSnapshotStateResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_GetRestoreSnapshotState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRestoreSnapshotState'
|
||||
type MockProxy_GetRestoreSnapshotState_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetRestoreSnapshotState is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.GetRestoreSnapshotStateRequest
|
||||
func (_e *MockProxy_Expecter) GetRestoreSnapshotState(_a0 interface{}, _a1 interface{}) *MockProxy_GetRestoreSnapshotState_Call {
|
||||
return &MockProxy_GetRestoreSnapshotState_Call{Call: _e.mock.On("GetRestoreSnapshotState", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_GetRestoreSnapshotState_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.GetRestoreSnapshotStateRequest)) *MockProxy_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.GetRestoreSnapshotStateRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_GetRestoreSnapshotState_Call) Return(_a0 *milvuspb.GetRestoreSnapshotStateResponse, _a1 error) *MockProxy_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_GetRestoreSnapshotState_Call) RunAndReturn(run func(context.Context, *milvuspb.GetRestoreSnapshotStateRequest) (*milvuspb.GetRestoreSnapshotStateResponse, error)) *MockProxy_GetRestoreSnapshotState_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetSegmentsInfo provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) GetSegmentsInfo(_a0 context.Context, _a1 *internalpb.GetSegmentsInfoRequest) (*internalpb.GetSegmentsInfoResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -5713,6 +6008,65 @@ func (_c *MockProxy_ListResourceGroups_Call) RunAndReturn(run func(context.Conte
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListRestoreSnapshotJobs provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) ListRestoreSnapshotJobs(_a0 context.Context, _a1 *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListRestoreSnapshotJobs")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ListRestoreSnapshotJobsResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) *milvuspb.ListRestoreSnapshotJobsResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ListRestoreSnapshotJobsResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_ListRestoreSnapshotJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListRestoreSnapshotJobs'
|
||||
type MockProxy_ListRestoreSnapshotJobs_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ListRestoreSnapshotJobs is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ListRestoreSnapshotJobsRequest
|
||||
func (_e *MockProxy_Expecter) ListRestoreSnapshotJobs(_a0 interface{}, _a1 interface{}) *MockProxy_ListRestoreSnapshotJobs_Call {
|
||||
return &MockProxy_ListRestoreSnapshotJobs_Call{Call: _e.mock.On("ListRestoreSnapshotJobs", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListRestoreSnapshotJobs_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ListRestoreSnapshotJobsRequest)) *MockProxy_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ListRestoreSnapshotJobsRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListRestoreSnapshotJobs_Call) Return(_a0 *milvuspb.ListRestoreSnapshotJobsResponse, _a1 error) *MockProxy_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListRestoreSnapshotJobs_Call) RunAndReturn(run func(context.Context, *milvuspb.ListRestoreSnapshotJobsRequest) (*milvuspb.ListRestoreSnapshotJobsResponse, error)) *MockProxy_ListRestoreSnapshotJobs_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListRowPolicies provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) ListRowPolicies(_a0 context.Context, _a1 *milvuspb.ListRowPoliciesRequest) (*milvuspb.ListRowPoliciesResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -5772,6 +6126,65 @@ func (_c *MockProxy_ListRowPolicies_Call) RunAndReturn(run func(context.Context,
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) ListSnapshots(_a0 context.Context, _a1 *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListSnapshots")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.ListSnapshotsResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ListSnapshotsRequest) *milvuspb.ListSnapshotsResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.ListSnapshotsResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ListSnapshotsRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_ListSnapshots_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSnapshots'
|
||||
type MockProxy_ListSnapshots_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// ListSnapshots is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.ListSnapshotsRequest
|
||||
func (_e *MockProxy_Expecter) ListSnapshots(_a0 interface{}, _a1 interface{}) *MockProxy_ListSnapshots_Call {
|
||||
return &MockProxy_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListSnapshots_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.ListSnapshotsRequest)) *MockProxy_ListSnapshots_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.ListSnapshotsRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListSnapshots_Call) Return(_a0 *milvuspb.ListSnapshotsResponse, _a1 error) *MockProxy_ListSnapshots_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_ListSnapshots_Call) RunAndReturn(run func(context.Context, *milvuspb.ListSnapshotsRequest) (*milvuspb.ListSnapshotsResponse, error)) *MockProxy_ListSnapshots_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// ListUsersWithTag provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) ListUsersWithTag(_a0 context.Context, _a1 *milvuspb.ListUsersWithTagRequest) (*milvuspb.ListUsersWithTagResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -6879,6 +7292,65 @@ func (_c *MockProxy_RestoreRBAC_Call) RunAndReturn(run func(context.Context, *mi
|
||||
return _c
|
||||
}
|
||||
|
||||
// RestoreSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) RestoreSnapshot(_a0 context.Context, _a1 *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RestoreSnapshot")
|
||||
}
|
||||
|
||||
var r0 *milvuspb.RestoreSnapshotResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.RestoreSnapshotRequest) *milvuspb.RestoreSnapshotResponse); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*milvuspb.RestoreSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.RestoreSnapshotRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_RestoreSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RestoreSnapshot'
|
||||
type MockProxy_RestoreSnapshot_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RestoreSnapshot is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.RestoreSnapshotRequest
|
||||
func (_e *MockProxy_Expecter) RestoreSnapshot(_a0 interface{}, _a1 interface{}) *MockProxy_RestoreSnapshot_Call {
|
||||
return &MockProxy_RestoreSnapshot_Call{Call: _e.mock.On("RestoreSnapshot", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_RestoreSnapshot_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.RestoreSnapshotRequest)) *MockProxy_RestoreSnapshot_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.RestoreSnapshotRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_RestoreSnapshot_Call) Return(_a0 *milvuspb.RestoreSnapshotResponse, _a1 error) *MockProxy_RestoreSnapshot_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_RestoreSnapshot_Call) RunAndReturn(run func(context.Context, *milvuspb.RestoreSnapshotRequest) (*milvuspb.RestoreSnapshotResponse, error)) *MockProxy_RestoreSnapshot_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RunAnalyzer provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) RunAnalyzer(_a0 context.Context, _a1 *milvuspb.RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@ -7658,6 +8130,65 @@ func (_c *MockProxy_TransferReplica_Call) RunAndReturn(run func(context.Context,
|
||||
return _c
|
||||
}
|
||||
|
||||
// TruncateCollection provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) TruncateCollection(_a0 context.Context, _a1 *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for TruncateCollection")
|
||||
}
|
||||
|
||||
var r0 *commonpb.Status
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error)); ok {
|
||||
return rf(_a0, _a1)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.TruncateCollectionRequest) *commonpb.Status); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*commonpb.Status)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.TruncateCollectionRequest) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockProxy_TruncateCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TruncateCollection'
|
||||
type MockProxy_TruncateCollection_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// TruncateCollection is a helper method to define mock.On call
|
||||
// - _a0 context.Context
|
||||
// - _a1 *milvuspb.TruncateCollectionRequest
|
||||
func (_e *MockProxy_Expecter) TruncateCollection(_a0 interface{}, _a1 interface{}) *MockProxy_TruncateCollection_Call {
|
||||
return &MockProxy_TruncateCollection_Call{Call: _e.mock.On("TruncateCollection", _a0, _a1)}
|
||||
}
|
||||
|
||||
func (_c *MockProxy_TruncateCollection_Call) Run(run func(_a0 context.Context, _a1 *milvuspb.TruncateCollectionRequest)) *MockProxy_TruncateCollection_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*milvuspb.TruncateCollectionRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_TruncateCollection_Call) Return(_a0 *commonpb.Status, _a1 error) *MockProxy_TruncateCollection_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockProxy_TruncateCollection_Call) RunAndReturn(run func(context.Context, *milvuspb.TruncateCollectionRequest) (*commonpb.Status, error)) *MockProxy_TruncateCollection_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateCredential provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockProxy) UpdateCredential(_a0 context.Context, _a1 *milvuspb.UpdateCredentialRequest) (*commonpb.Status, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
@ -684,6 +684,64 @@ func (_c *MockShardManager_DropPartition_Call) RunAndReturn(run func(message.Imm
|
||||
return _c
|
||||
}
|
||||
|
||||
// FlushAllAndFenceSegmentAllocUntil provides a mock function with given fields: timetick
|
||||
func (_m *MockShardManager) FlushAllAndFenceSegmentAllocUntil(timetick uint64) ([]int64, error) {
|
||||
ret := _m.Called(timetick)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FlushAllAndFenceSegmentAllocUntil")
|
||||
}
|
||||
|
||||
var r0 []int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(uint64) ([]int64, error)); ok {
|
||||
return rf(timetick)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(uint64) []int64); ok {
|
||||
r0 = rf(timetick)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]int64)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(uint64) error); ok {
|
||||
r1 = rf(timetick)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAllAndFenceSegmentAllocUntil'
|
||||
type MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// FlushAllAndFenceSegmentAllocUntil is a helper method to define mock.On call
|
||||
// - timetick uint64
|
||||
func (_e *MockShardManager_Expecter) FlushAllAndFenceSegmentAllocUntil(timetick interface{}) *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call {
|
||||
return &MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call{Call: _e.mock.On("FlushAllAndFenceSegmentAllocUntil", timetick)}
|
||||
}
|
||||
|
||||
func (_c *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call) Run(run func(timetick uint64)) *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(uint64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call) Return(_a0 []int64, _a1 error) *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call) RunAndReturn(run func(uint64) ([]int64, error)) *MockShardManager_FlushAllAndFenceSegmentAllocUntil_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// FlushAndFenceSegmentAllocUntil provides a mock function with given fields: collectionID, timetick
|
||||
func (_m *MockShardManager) FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error) {
|
||||
ret := _m.Called(collectionID, timetick)
|
||||
|
||||
@ -71,7 +71,6 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/requestutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
@ -4096,15 +4095,12 @@ func (node *Proxy) FlushAll(ctx context.Context, request *milvuspb.FlushAllReque
|
||||
Condition: NewTaskCondition(ctx),
|
||||
FlushAllRequest: request,
|
||||
mixCoord: node.mixCoord,
|
||||
chMgr: node.chMgr,
|
||||
}
|
||||
|
||||
method := "FlushAll"
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", request.DbName))
|
||||
log := log.Ctx(ctx).With(zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug(rpcReceived(method))
|
||||
|
||||
@ -4129,11 +4125,8 @@ func (node *Proxy) FlushAll(ctx context.Context, request *milvuspb.FlushAllReque
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
rpcDone(method),
|
||||
zap.Uint64("FlushAllTs", ft.result.GetFlushAllTs()),
|
||||
zap.Uint64("BeginTs", ft.BeginTs()),
|
||||
zap.Uint64("EndTs", ft.EndTs()))
|
||||
log.Debug(rpcDone(method),
|
||||
zap.Any("FlushAllTss", ft.result.GetFlushAllTss()))
|
||||
|
||||
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return ft.result, nil
|
||||
@ -4803,9 +4796,7 @@ func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStat
|
||||
func (node *Proxy) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error) {
|
||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-GetFlushAllState")
|
||||
defer sp.End()
|
||||
log := log.Ctx(ctx).With(zap.Uint64("FlushAllTs", req.GetFlushAllTs()),
|
||||
zap.Time("FlushAllTime", tsoutil.PhysicalTime(req.GetFlushAllTs())),
|
||||
zap.String("db", req.GetDbName()))
|
||||
log := log.Ctx(ctx).With(zap.Any("FlushAllTss", req.GetFlushAllTss()))
|
||||
log.Debug("receive GetFlushAllState request")
|
||||
|
||||
var err error
|
||||
|
||||
@ -466,49 +466,8 @@ func createTestProxy() *Proxy {
|
||||
return node
|
||||
}
|
||||
|
||||
func TestProxy_FlushAll_NoDatabase(t *testing.T) {
|
||||
mockey.PatchConvey("TestProxy_FlushAll_NoDatabase", t, func() {
|
||||
// Mock global meta cache methods
|
||||
globalMetaCache = &MetaCache{}
|
||||
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
|
||||
return UniqueID(0), nil
|
||||
}).Build()
|
||||
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
|
||||
return nil
|
||||
}).Build()
|
||||
|
||||
// Mock paramtable initialization
|
||||
mockey.Mock(paramtable.Init).Return().Build()
|
||||
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
|
||||
|
||||
successStatus := &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
|
||||
mockey.Mock((*grpcmixcoordclient.Client).ListDatabases).To(func(ctx context.Context, req *milvuspb.ListDatabasesRequest, opts ...grpc.CallOption) (*milvuspb.ListDatabasesResponse, error) {
|
||||
return &milvuspb.ListDatabasesResponse{Status: successStatus}, nil
|
||||
}).Build()
|
||||
mockey.Mock((*grpcmixcoordclient.Client).ShowCollections).To(func(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
return &milvuspb.ShowCollectionsResponse{Status: successStatus}, nil
|
||||
}).Build()
|
||||
|
||||
// Act: Execute test
|
||||
node := createTestProxy()
|
||||
defer node.sched.Close()
|
||||
|
||||
mixcoord := &grpcmixcoordclient.Client{}
|
||||
node.mixCoord = mixcoord
|
||||
mockey.Mock((*grpcmixcoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
|
||||
return &datapb.FlushAllResponse{Status: successStatus}, nil
|
||||
}).Build()
|
||||
|
||||
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
|
||||
|
||||
// Assert: Verify results
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, merr.Ok(resp.GetStatus()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProxy_FlushAll_WithDefaultDatabase(t *testing.T) {
|
||||
mockey.PatchConvey("TestProxy_FlushAll_WithDefaultDatabase", t, func() {
|
||||
func TestProxy_FlushAll_Success(t *testing.T) {
|
||||
mockey.PatchConvey("TestProxy_FlushAll_Success", t, func() {
|
||||
// Mock global meta cache methods
|
||||
globalMetaCache = &MetaCache{}
|
||||
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
|
||||
@ -541,7 +500,7 @@ func TestProxy_FlushAll_WithDefaultDatabase(t *testing.T) {
|
||||
return &datapb.FlushAllResponse{Status: successStatus}, nil
|
||||
}).Build()
|
||||
|
||||
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default"})
|
||||
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{})
|
||||
|
||||
// Assert: Verify results
|
||||
assert.NoError(t, err)
|
||||
@ -549,43 +508,6 @@ func TestProxy_FlushAll_WithDefaultDatabase(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProxy_FlushAll_DatabaseNotExist(t *testing.T) {
|
||||
mockey.PatchConvey("TestProxy_FlushAll_DatabaseNotExist", t, func() {
|
||||
// Mock global meta cache methods
|
||||
globalMetaCache = &MetaCache{}
|
||||
mockey.Mock(globalMetaCache.GetCollectionID).To(func(ctx context.Context, dbName, collectionName string) (UniqueID, error) {
|
||||
return UniqueID(0), nil
|
||||
}).Build()
|
||||
mockey.Mock(globalMetaCache.RemoveDatabase).To(func(ctx context.Context, dbName string) error {
|
||||
return nil
|
||||
}).Build()
|
||||
|
||||
// Mock paramtable initialization
|
||||
mockey.Mock(paramtable.Init).Return().Build()
|
||||
mockey.Mock((*paramtable.ComponentParam).Save).Return().Build()
|
||||
|
||||
mockey.Mock((*grpcmixcoordclient.Client).ShowCollections).To(func(ctx context.Context, req *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) {
|
||||
return &milvuspb.ShowCollectionsResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_MetaFailed}}, nil
|
||||
}).Build()
|
||||
|
||||
// Act: Execute test
|
||||
node := createTestProxy()
|
||||
defer node.sched.Close()
|
||||
|
||||
mixcoord := &grpcmixcoordclient.Client{}
|
||||
node.mixCoord = mixcoord
|
||||
mockey.Mock((*grpcmixcoordclient.Client).FlushAll).To(func(ctx context.Context, req *datapb.FlushAllRequest, opts ...grpc.CallOption) (*datapb.FlushAllResponse, error) {
|
||||
return &datapb.FlushAllResponse{Status: merr.Success()}, nil
|
||||
}).Build()
|
||||
|
||||
resp, err := node.FlushAll(context.Background(), &milvuspb.FlushAllRequest{DbName: "default2"})
|
||||
|
||||
// Assert: Verify results
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_MetaFailed)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProxy_FlushAll_ServerAbnormal(t *testing.T) {
|
||||
mockey.PatchConvey("TestProxy_FlushAll_ServerAbnormal", t, func() {
|
||||
// Mock global meta cache methods
|
||||
|
||||
@ -33,7 +33,6 @@ type flushAllTask struct {
|
||||
ctx context.Context
|
||||
mixCoord types.MixCoordClient
|
||||
result *milvuspb.FlushAllResponse
|
||||
chMgr channelsMgr
|
||||
}
|
||||
|
||||
func (t *flushAllTask) TraceCtx() context.Context {
|
||||
|
||||
@ -19,186 +19,25 @@ package proxy
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func (t *flushAllTask) Execute(ctx context.Context) error {
|
||||
flushTs := t.BeginTs()
|
||||
timeOfSeal, _ := tsoutil.ParseTS(flushTs)
|
||||
|
||||
// Note: for now, flush will send flush signal to wal on streamnode, then get flush segment list from datacoord
|
||||
// so we need to expand flush collection names to make sure that flushed collection list is same as each other
|
||||
targets, err := t.expandFlushCollectionNames(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send flush signal to wal on streamnode
|
||||
onFlushSegmentMap, err := t.sendManualFlushAllToWal(ctx, targets, flushTs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get flush detail info from datacoord
|
||||
resp, err := t.mixCoord.FlushAll(ctx, &datapb.FlushAllRequest{
|
||||
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
|
||||
DbName: t.GetDbName(),
|
||||
FlushTargets: targets,
|
||||
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_Flush)),
|
||||
})
|
||||
if err = merr.CheckRPCCall(resp, err); err != nil {
|
||||
return fmt.Errorf("failed to call flush all to data coordinator: %s", err.Error())
|
||||
}
|
||||
|
||||
dbResultsMap := lo.GroupBy(resp.GetFlushResults(), func(result *datapb.FlushResult) string {
|
||||
return result.GetDbName()
|
||||
})
|
||||
results := make([]*milvuspb.FlushAllResult, 0)
|
||||
for dbName, dbResults := range dbResultsMap {
|
||||
results = append(results, &milvuspb.FlushAllResult{
|
||||
DbName: dbName,
|
||||
CollectionResults: lo.Map(dbResults, func(result *datapb.FlushResult, _ int) *milvuspb.FlushCollectionResult {
|
||||
onFlushSegmentIDs := onFlushSegmentMap[result.GetCollectionID()]
|
||||
// Remove the flushed segments from onFlushSegmentIDs
|
||||
flushedSegmentSet := typeutil.NewUniqueSet(result.GetFlushSegmentIDs()...)
|
||||
filteredSegments := make([]int64, 0, len(onFlushSegmentIDs))
|
||||
for _, id := range onFlushSegmentIDs {
|
||||
if !flushedSegmentSet.Contain(id) {
|
||||
filteredSegments = append(filteredSegments, id)
|
||||
}
|
||||
}
|
||||
onFlushSegmentIDs = filteredSegments
|
||||
return &milvuspb.FlushCollectionResult{
|
||||
CollectionName: result.GetCollectionName(),
|
||||
SegmentIds: &schemapb.LongArray{Data: onFlushSegmentIDs},
|
||||
FlushSegmentIds: &schemapb.LongArray{Data: result.GetFlushSegmentIDs()},
|
||||
SealTime: timeOfSeal.Unix(),
|
||||
FlushTs: flushTs,
|
||||
ChannelCps: result.GetChannelCps(),
|
||||
}
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
t.result = &milvuspb.FlushAllResponse{
|
||||
Status: merr.Success(),
|
||||
FlushAllTs: flushTs,
|
||||
FlushResults: results,
|
||||
Status: merr.Success(),
|
||||
FlushAllTss: resp.GetFlushAllTss(),
|
||||
ClusterInfo: resp.GetClusterInfo(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo: refine this by sending a single FlushAll message to wal
|
||||
func (t *flushAllTask) sendManualFlushAllToWal(ctx context.Context, flushTargets []*datapb.FlushAllTarget, flushTs Timestamp) (map[int64][]int64, error) {
|
||||
wg := errgroup.Group{}
|
||||
// limit goroutine number to 100
|
||||
wg.SetLimit(100)
|
||||
|
||||
var mu sync.Mutex
|
||||
results := make(map[int64][]int64)
|
||||
|
||||
for _, target := range flushTargets {
|
||||
for _, coll := range target.CollectionIds {
|
||||
collID := coll
|
||||
wg.Go(func() error {
|
||||
vchannels, err := t.chMgr.getVChannels(collID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
onFlushSegmentIDs := make([]int64, 0)
|
||||
// Ask the streamingnode to flush segments.
|
||||
for _, vchannel := range vchannels {
|
||||
segmentIDs, err := sendManualFlushToWAL(ctx, collID, vchannel, flushTs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
onFlushSegmentIDs = append(onFlushSegmentIDs, segmentIDs...)
|
||||
}
|
||||
mu.Lock()
|
||||
results[collID] = onFlushSegmentIDs
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (t *flushAllTask) expandFlushCollectionNames(ctx context.Context) ([]*datapb.FlushAllTarget, error) {
|
||||
// Determine which databases and collections to flush
|
||||
targets := make([]*datapb.FlushAllTarget, 0)
|
||||
if len(t.GetFlushTargets()) > 0 {
|
||||
// Use flush_targets from request
|
||||
for _, target := range t.GetFlushTargets() {
|
||||
collectionIDs := make([]int64, 0)
|
||||
for _, collectionName := range target.GetCollectionNames() {
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, target.GetDbName(), collectionName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collectionIDs = append(collectionIDs, collectionID)
|
||||
}
|
||||
targets = append(targets, &datapb.FlushAllTarget{
|
||||
DbName: target.GetDbName(),
|
||||
CollectionIds: collectionIDs,
|
||||
})
|
||||
}
|
||||
} else if t.GetDbName() != "" {
|
||||
// Backward compatibility: use deprecated db_name field
|
||||
targets = append(targets, &datapb.FlushAllTarget{
|
||||
DbName: t.GetDbName(),
|
||||
CollectionIds: []int64{},
|
||||
})
|
||||
} else {
|
||||
// Flush all databases
|
||||
listResp, err := t.mixCoord.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{
|
||||
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases)),
|
||||
})
|
||||
if err != nil {
|
||||
log.Info("flush all task by streaming service failed, list databases failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
for _, dbName := range listResp.GetDbNames() {
|
||||
targets = append(targets, &datapb.FlushAllTarget{
|
||||
DbName: dbName,
|
||||
CollectionIds: []int64{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// If CollectionNames is empty, it means flush all collections in this database
|
||||
for _, target := range targets {
|
||||
collectionNames := target.GetCollectionIds()
|
||||
if len(collectionNames) == 0 {
|
||||
showColRsp, err := t.mixCoord.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{
|
||||
Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections)),
|
||||
DbName: target.GetDbName(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target.CollectionIds = showColRsp.GetCollectionIds()
|
||||
}
|
||||
}
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -26,6 +26,7 @@ var (
|
||||
|
||||
// Cluster
|
||||
RegisterAlterReplicateConfigV2AckCallback = registerMessageAckCallback[*message.AlterReplicateConfigMessageHeader, *message.AlterReplicateConfigMessageBody]
|
||||
RegisterFlushAllV2AckCallback = registerMessageAckCallback[*message.FlushAllMessageHeader, *message.FlushAllMessageBody]
|
||||
|
||||
// Collection
|
||||
RegisterAlterCollectionV2AckCallback = registerMessageAckCallback[*message.AlterCollectionMessageHeader, *message.AlterCollectionMessageBody]
|
||||
@ -77,6 +78,7 @@ func resetMessageAckCallbacks() {
|
||||
|
||||
// Cluster
|
||||
message.MessageTypeAlterReplicateConfigV2: syncutil.NewFuture[messageInnerAckCallback](),
|
||||
message.MessageTypeFlushAllV2: syncutil.NewFuture[messageInnerAckCallback](),
|
||||
|
||||
// Collection
|
||||
message.MessageTypeAlterCollectionV2: syncutil.NewFuture[messageInnerAckCallback](),
|
||||
|
||||
@ -114,7 +114,7 @@ func (impl *flusherComponents) WhenDropCollection(vchannel string) {
|
||||
// HandleMessage handles the plain message.
|
||||
func (impl *flusherComponents) HandleMessage(ctx context.Context, msg message.ImmutableMessage) error {
|
||||
vchannel := msg.VChannel()
|
||||
if vchannel == "" {
|
||||
if vchannel == "" || isBroadcastToAllMessage(msg.MessageType()) {
|
||||
return impl.broadcastToAllDataSyncService(ctx, msg)
|
||||
}
|
||||
if _, ok := impl.dataServices[vchannel]; !ok {
|
||||
|
||||
@ -108,6 +108,17 @@ func (impl *msgHandlerImpl) HandleManualFlush(flushMsg message.ImmutableManualFl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *msgHandlerImpl) HandleFlushAll(vchannel string, flushAllMsg message.ImmutableFlushAllMessageV2) error {
|
||||
if err := impl.wbMgr.SealAllSegments(context.Background(), vchannel); err != nil {
|
||||
return errors.Wrap(err, "failed to seal all segments")
|
||||
}
|
||||
// Use FlushAllMsg's ts as flush ts.
|
||||
if err := impl.wbMgr.FlushChannel(context.Background(), vchannel, flushAllMsg.TimeTick()); err != nil {
|
||||
return errors.Wrap(err, "failed to flush channel")
|
||||
} // may be redundant.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *msgHandlerImpl) HandleSchemaChange(ctx context.Context, msg message.ImmutableSchemaChangeMessageV2) error {
|
||||
return impl.wbMgr.SealSegments(context.Background(), msg.VChannel(), msg.Header().FlushedSegmentIds)
|
||||
}
|
||||
|
||||
@ -102,6 +102,45 @@ func TestFlushMsgHandler_HandleManualFlush(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFlushMsgHandler_HandleFlushAll(t *testing.T) {
|
||||
vchannel := "ch-0"
|
||||
|
||||
// test failed
|
||||
wbMgr := writebuffer.NewMockBufferManager(t)
|
||||
wbMgr.EXPECT().SealAllSegments(mock.Anything, mock.Anything).Return(errors.New("mock err"))
|
||||
wbMgr.EXPECT().FlushChannel(mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mock err"))
|
||||
|
||||
msg := message.NewFlushAllMessageBuilderV2().
|
||||
WithBroadcast([]string{vchannel}).
|
||||
WithHeader(&message.FlushAllMessageHeader{}).
|
||||
WithBody(&message.FlushAllMessageBody{}).
|
||||
WithProperties(map[string]string{
|
||||
"_tt": "1",
|
||||
}).
|
||||
MustBuildBroadcast().
|
||||
WithBroadcastID(1).
|
||||
SplitIntoMutableMessage()[0]
|
||||
|
||||
handler := newMsgHandler(wbMgr)
|
||||
msgID := mock_message.NewMockMessageID(t)
|
||||
im, err := message.AsImmutableFlushAllMessageV2(msg.IntoImmutableMessage(msgID))
|
||||
assert.NoError(t, err)
|
||||
err = handler.HandleFlushAll(vchannel, im)
|
||||
assert.Error(t, err)
|
||||
|
||||
wbMgr.EXPECT().SealAllSegments(mock.Anything, mock.Anything).Unset()
|
||||
wbMgr.EXPECT().SealAllSegments(mock.Anything, mock.Anything).Return(nil)
|
||||
err = handler.HandleFlushAll(vchannel, im)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test normal
|
||||
wbMgr.EXPECT().FlushChannel(mock.Anything, mock.Anything, mock.Anything).Unset()
|
||||
wbMgr.EXPECT().FlushChannel(mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
err = handler.HandleFlushAll(vchannel, im)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFlushMsgHandler_HandlSchemaChange(t *testing.T) {
|
||||
vchannel := "ch-0"
|
||||
|
||||
|
||||
@ -18,6 +18,16 @@ import (
|
||||
|
||||
var defaultCollectionNotFoundTolerance = 10
|
||||
|
||||
var broadcastToAllMessageType = map[message.MessageType]struct{}{
|
||||
message.MessageTypeFlushAll: {},
|
||||
}
|
||||
|
||||
// isBroadcastToAllMessage checks if the message need to be broadcast to all data sync services.
|
||||
func isBroadcastToAllMessage(msg message.MessageType) bool {
|
||||
_, ok := broadcastToAllMessageType[msg]
|
||||
return ok
|
||||
}
|
||||
|
||||
// getRecoveryInfos gets the recovery info of the vchannels from datacoord
|
||||
func (impl *WALFlusherImpl) getRecoveryInfos(ctx context.Context, vchannel []string) (map[string]*datapb.GetChannelRecoveryInfoResponse, message.MessageID, error) {
|
||||
futures := make([]*conc.Future[interface{}], 0, len(vchannel))
|
||||
|
||||
@ -215,7 +215,7 @@ func (impl *WALFlusherImpl) dispatch(msg message.ImmutableMessage) (err error) {
|
||||
}()
|
||||
|
||||
// wal flusher will not handle the control channel message.
|
||||
if funcutil.IsControlChannel(msg.VChannel()) {
|
||||
if funcutil.IsControlChannel(msg.VChannel()) && !isBroadcastToAllMessage(msg.MessageType()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -43,6 +43,7 @@ func (impl *shardInterceptor) initOpTable() {
|
||||
message.MessageTypeAlterCollection: impl.handleAlterCollection,
|
||||
message.MessageTypeCreateSegment: impl.handleCreateSegment,
|
||||
message.MessageTypeFlush: impl.handleFlushSegment,
|
||||
message.MessageTypeFlushAll: impl.handleFlushAllMessage,
|
||||
}
|
||||
}
|
||||
|
||||
@ -297,5 +298,14 @@ func (impl *shardInterceptor) handleFlushSegment(ctx context.Context, msg messag
|
||||
return msgID, nil
|
||||
}
|
||||
|
||||
// handleFlushAllMessage handles the flush all message.
|
||||
func (impl *shardInterceptor) handleFlushAllMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
|
||||
_, err := impl.shardManager.FlushAllAndFenceSegmentAllocUntil(msg.TimeTick())
|
||||
if err != nil {
|
||||
return nil, status.NewUnrecoverableError(err.Error())
|
||||
}
|
||||
return appendOp(ctx, msg)
|
||||
}
|
||||
|
||||
// Close closes the segment interceptor.
|
||||
func (impl *shardInterceptor) Close() {}
|
||||
|
||||
@ -44,6 +44,8 @@ type ShardManager interface {
|
||||
|
||||
FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error)
|
||||
|
||||
FlushAllAndFenceSegmentAllocUntil(timetick uint64) ([]int64, error)
|
||||
|
||||
AsyncFlushSegment(signal utils.SealSegmentSignal)
|
||||
|
||||
Close()
|
||||
|
||||
@ -169,6 +169,30 @@ func (m *shardManagerImpl) FlushAndFenceSegmentAllocUntil(collectionID int64, ti
|
||||
logger := m.Logger().With(zap.Int64("collectionID", collectionID), zap.Uint64("timetick", timetick))
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
segmentIDs, err := m.flushAndFenceSegmentAllocUntil(collectionID, timetick)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Info("segments should be flushed when FlushAndFenceSegmentAllocUntil", zap.Int64s("segmentIDs", segmentIDs))
|
||||
return segmentIDs, nil
|
||||
}
|
||||
|
||||
func (m *shardManagerImpl) FlushAllAndFenceSegmentAllocUntil(timetick uint64) ([]int64, error) {
|
||||
logger := m.Logger().With(zap.Uint64("timetick", timetick))
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
segmentIDs := make([]int64, 0)
|
||||
for collectionID := range m.collections {
|
||||
ids, _ := m.flushAndFenceSegmentAllocUntil(collectionID, timetick)
|
||||
segmentIDs = append(segmentIDs, ids...)
|
||||
}
|
||||
logger.Info("segments should be flushed when FlushAllAndFenceSegmentAllocUntil", zap.Int64s("segmentIDs", segmentIDs))
|
||||
return segmentIDs, nil
|
||||
}
|
||||
|
||||
func (m *shardManagerImpl) flushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error) {
|
||||
logger := m.Logger().With(zap.Int64("collectionID", collectionID), zap.Uint64("timetick", timetick))
|
||||
|
||||
if err := m.checkIfCollectionExists(collectionID); err != nil {
|
||||
logger.Warn("collection not found when FlushAndFenceSegmentAllocUntil", zap.Error(err))
|
||||
@ -189,7 +213,6 @@ func (m *shardManagerImpl) FlushAndFenceSegmentAllocUntil(collectionID int64, ti
|
||||
newSealedSegments := pm.FlushAndFenceSegmentUntil(timetick)
|
||||
segmentIDs = append(segmentIDs, newSealedSegments...)
|
||||
}
|
||||
logger.Info("segments should be flushed when FlushAndFenceSegmentAllocUntil", zap.Int64s("segmentIDs", segmentIDs))
|
||||
return segmentIDs, nil
|
||||
}
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ require (
|
||||
github.com/jolestar/go-commons-pool/v2 v2.1.2
|
||||
github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6
|
||||
github.com/minio/minio-go/v7 v7.0.73
|
||||
github.com/panjf2000/ants/v2 v2.11.3
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
|
||||
@ -621,8 +621,8 @@ github.com/milvus-io/cgosymbolizer v0.0.0-20250318084424-114f4050c3a6 h1:YHMFI6L
|
||||
github.com/milvus-io/cgosymbolizer v0.0.0-20250318084424-114f4050c3a6/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg=
|
||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
|
||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45 h1:TMUhlirMCH2zgJD+qClP5EP0yuFl1VrE4j+0fiRSuJU=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6 h1:TeHfsRCdjbX30xS7Npcb+POQXd460+AjmXYmmTuxyBA=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.73 h1:qr2vi96Qm7kZ4v7LLebjte+MQh621fFWnv93p12htEo=
|
||||
|
||||
@ -30,6 +30,7 @@ enum SegmentLevel {
|
||||
|
||||
service DataCoord {
|
||||
rpc Flush(FlushRequest) returns (FlushResponse) {}
|
||||
// FlushAll flushes all data in the cluster.
|
||||
rpc FlushAll(FlushAllRequest) returns(FlushAllResponse) {}
|
||||
|
||||
rpc CreateExternalCollection(msg.CreateCollectionRequest) returns (CreateExternalCollectionResponse) {}
|
||||
@ -181,13 +182,12 @@ message FlushResult {
|
||||
}
|
||||
|
||||
message FlushAllRequest {
|
||||
common.MsgBase base = 1;
|
||||
string dbName = 2; // Deprecated: use flush_targets instead
|
||||
|
||||
// List of specific databases and collections to flush
|
||||
repeated FlushAllTarget flush_targets = 3;
|
||||
common.MsgBase base = 1;
|
||||
string dbName = 2 [deprecated = true];
|
||||
repeated FlushAllTarget flush_targets = 3 [deprecated = true];
|
||||
}
|
||||
|
||||
// Deprecated: FlushAll semantics changed to flushing the entire cluster.
|
||||
// Specific collection to flush with database context
|
||||
// This message allows targeting specific collections within a database for flush operations
|
||||
message FlushAllTarget {
|
||||
@ -199,11 +199,11 @@ message FlushAllTarget {
|
||||
}
|
||||
|
||||
message FlushAllResponse {
|
||||
common.Status status = 1;
|
||||
uint64 flushTs = 2;
|
||||
|
||||
// Detailed flush results for each target
|
||||
repeated FlushResult flush_results = 3;
|
||||
common.Status status = 1;
|
||||
uint64 flushTs = 2 [deprecated = true];
|
||||
repeated FlushResult flush_results = 3 [deprecated = true];
|
||||
map<string, uint64> flush_all_tss = 4; // pchannel -> FlushAllMsg'ts
|
||||
milvus.ClusterInfo cluster_info = 5;
|
||||
}
|
||||
|
||||
message FlushChannelsRequest {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -82,6 +82,7 @@ const (
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type DataCoordClient interface {
|
||||
Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error)
|
||||
// FlushAll flushes all data in the cluster.
|
||||
FlushAll(ctx context.Context, in *FlushAllRequest, opts ...grpc.CallOption) (*FlushAllResponse, error)
|
||||
CreateExternalCollection(ctx context.Context, in *msgpb.CreateCollectionRequest, opts ...grpc.CallOption) (*CreateExternalCollectionResponse, error)
|
||||
// AllocSegment alloc a new growing segment, add it into segment meta.
|
||||
@ -617,6 +618,7 @@ func (c *dataCoordClient) ListFileResources(ctx context.Context, in *milvuspb.Li
|
||||
// for forward compatibility
|
||||
type DataCoordServer interface {
|
||||
Flush(context.Context, *FlushRequest) (*FlushResponse, error)
|
||||
// FlushAll flushes all data in the cluster.
|
||||
FlushAll(context.Context, *FlushAllRequest) (*FlushAllResponse, error)
|
||||
CreateExternalCollection(context.Context, *msgpb.CreateCollectionRequest) (*CreateExternalCollectionResponse, error)
|
||||
// AllocSegment alloc a new growing segment, add it into segment meta.
|
||||
|
||||
@ -58,6 +58,7 @@ enum MessageType {
|
||||
CreateIndex = 34;
|
||||
AlterIndex = 35;
|
||||
DropIndex = 36;
|
||||
FlushAll = 37;
|
||||
|
||||
// AlterReplicateConfig is used to alter the replicate configuration to the current cluster.
|
||||
// When the AlterReplicateConfig message is received, the replication topology is changed.
|
||||
@ -584,6 +585,10 @@ message ManualFlushExtraResponse {
|
||||
repeated int64 segment_ids = 1;
|
||||
}
|
||||
|
||||
message FlushAllMessageHeader {}
|
||||
|
||||
message FlushAllMessageBody {}
|
||||
|
||||
// TxnContext is the context of transaction.
|
||||
// It will be carried by every message in a transaction.
|
||||
message TxnContext {
|
||||
|
||||
@ -70,6 +70,7 @@ const (
|
||||
MessageType_CreateIndex MessageType = 34
|
||||
MessageType_AlterIndex MessageType = 35
|
||||
MessageType_DropIndex MessageType = 36
|
||||
MessageType_FlushAll MessageType = 37
|
||||
// AlterReplicateConfig is used to alter the replicate configuration to the current cluster.
|
||||
// When the AlterReplicateConfig message is received, the replication topology is changed.
|
||||
// Maybe some cluster give up the leader role, no any other message will be received from this cluster.
|
||||
@ -140,6 +141,7 @@ var (
|
||||
34: "CreateIndex",
|
||||
35: "AlterIndex",
|
||||
36: "DropIndex",
|
||||
37: "FlushAll",
|
||||
800: "AlterReplicateConfig",
|
||||
900: "BeginTxn",
|
||||
901: "CommitTxn",
|
||||
@ -184,6 +186,7 @@ var (
|
||||
"CreateIndex": 34,
|
||||
"AlterIndex": 35,
|
||||
"DropIndex": 36,
|
||||
"FlushAll": 37,
|
||||
"AlterReplicateConfig": 800,
|
||||
"BeginTxn": 900,
|
||||
"CommitTxn": 901,
|
||||
@ -4912,6 +4915,82 @@ func (x *ManualFlushExtraResponse) GetSegmentIds() []int64 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type FlushAllMessageHeader struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *FlushAllMessageHeader) Reset() {
|
||||
*x = FlushAllMessageHeader{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[89]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FlushAllMessageHeader) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FlushAllMessageHeader) ProtoMessage() {}
|
||||
|
||||
func (x *FlushAllMessageHeader) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[89]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FlushAllMessageHeader.ProtoReflect.Descriptor instead.
|
||||
func (*FlushAllMessageHeader) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{89}
|
||||
}
|
||||
|
||||
type FlushAllMessageBody struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *FlushAllMessageBody) Reset() {
|
||||
*x = FlushAllMessageBody{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[90]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FlushAllMessageBody) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FlushAllMessageBody) ProtoMessage() {}
|
||||
|
||||
func (x *FlushAllMessageBody) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[90]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FlushAllMessageBody.ProtoReflect.Descriptor instead.
|
||||
func (*FlushAllMessageBody) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{90}
|
||||
}
|
||||
|
||||
// TxnContext is the context of transaction.
|
||||
// It will be carried by every message in a transaction.
|
||||
type TxnContext struct {
|
||||
@ -4930,7 +5009,7 @@ type TxnContext struct {
|
||||
func (x *TxnContext) Reset() {
|
||||
*x = TxnContext{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[89]
|
||||
mi := &file_messages_proto_msgTypes[91]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -4943,7 +5022,7 @@ func (x *TxnContext) String() string {
|
||||
func (*TxnContext) ProtoMessage() {}
|
||||
|
||||
func (x *TxnContext) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[89]
|
||||
mi := &file_messages_proto_msgTypes[91]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -4956,7 +5035,7 @@ func (x *TxnContext) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use TxnContext.ProtoReflect.Descriptor instead.
|
||||
func (*TxnContext) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{89}
|
||||
return file_messages_proto_rawDescGZIP(), []int{91}
|
||||
}
|
||||
|
||||
func (x *TxnContext) GetTxnId() int64 {
|
||||
@ -4986,7 +5065,7 @@ type RMQMessageLayout struct {
|
||||
func (x *RMQMessageLayout) Reset() {
|
||||
*x = RMQMessageLayout{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[90]
|
||||
mi := &file_messages_proto_msgTypes[92]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -4999,7 +5078,7 @@ func (x *RMQMessageLayout) String() string {
|
||||
func (*RMQMessageLayout) ProtoMessage() {}
|
||||
|
||||
func (x *RMQMessageLayout) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[90]
|
||||
mi := &file_messages_proto_msgTypes[92]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -5012,7 +5091,7 @@ func (x *RMQMessageLayout) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use RMQMessageLayout.ProtoReflect.Descriptor instead.
|
||||
func (*RMQMessageLayout) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{90}
|
||||
return file_messages_proto_rawDescGZIP(), []int{92}
|
||||
}
|
||||
|
||||
func (x *RMQMessageLayout) GetPayload() []byte {
|
||||
@ -5043,7 +5122,7 @@ type BroadcastHeader struct {
|
||||
func (x *BroadcastHeader) Reset() {
|
||||
*x = BroadcastHeader{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[91]
|
||||
mi := &file_messages_proto_msgTypes[93]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -5056,7 +5135,7 @@ func (x *BroadcastHeader) String() string {
|
||||
func (*BroadcastHeader) ProtoMessage() {}
|
||||
|
||||
func (x *BroadcastHeader) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[91]
|
||||
mi := &file_messages_proto_msgTypes[93]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -5069,7 +5148,7 @@ func (x *BroadcastHeader) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use BroadcastHeader.ProtoReflect.Descriptor instead.
|
||||
func (*BroadcastHeader) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{91}
|
||||
return file_messages_proto_rawDescGZIP(), []int{93}
|
||||
}
|
||||
|
||||
func (x *BroadcastHeader) GetBroadcastId() uint64 {
|
||||
@ -5109,7 +5188,7 @@ type ReplicateHeader struct {
|
||||
func (x *ReplicateHeader) Reset() {
|
||||
*x = ReplicateHeader{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[92]
|
||||
mi := &file_messages_proto_msgTypes[94]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -5122,7 +5201,7 @@ func (x *ReplicateHeader) String() string {
|
||||
func (*ReplicateHeader) ProtoMessage() {}
|
||||
|
||||
func (x *ReplicateHeader) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[92]
|
||||
mi := &file_messages_proto_msgTypes[94]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -5135,7 +5214,7 @@ func (x *ReplicateHeader) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ReplicateHeader.ProtoReflect.Descriptor instead.
|
||||
func (*ReplicateHeader) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{92}
|
||||
return file_messages_proto_rawDescGZIP(), []int{94}
|
||||
}
|
||||
|
||||
func (x *ReplicateHeader) GetClusterId() string {
|
||||
@ -5189,7 +5268,7 @@ type ResourceKey struct {
|
||||
func (x *ResourceKey) Reset() {
|
||||
*x = ResourceKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[93]
|
||||
mi := &file_messages_proto_msgTypes[95]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -5202,7 +5281,7 @@ func (x *ResourceKey) String() string {
|
||||
func (*ResourceKey) ProtoMessage() {}
|
||||
|
||||
func (x *ResourceKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[93]
|
||||
mi := &file_messages_proto_msgTypes[95]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -5215,7 +5294,7 @@ func (x *ResourceKey) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ResourceKey.ProtoReflect.Descriptor instead.
|
||||
func (*ResourceKey) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{93}
|
||||
return file_messages_proto_rawDescGZIP(), []int{95}
|
||||
}
|
||||
|
||||
func (x *ResourceKey) GetDomain() ResourceDomain {
|
||||
@ -5254,7 +5333,7 @@ type CipherHeader struct {
|
||||
func (x *CipherHeader) Reset() {
|
||||
*x = CipherHeader{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_messages_proto_msgTypes[94]
|
||||
mi := &file_messages_proto_msgTypes[96]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@ -5267,7 +5346,7 @@ func (x *CipherHeader) String() string {
|
||||
func (*CipherHeader) ProtoMessage() {}
|
||||
|
||||
func (x *CipherHeader) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[94]
|
||||
mi := &file_messages_proto_msgTypes[96]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@ -5280,7 +5359,7 @@ func (x *CipherHeader) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use CipherHeader.ProtoReflect.Descriptor instead.
|
||||
func (*CipherHeader) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{94}
|
||||
return file_messages_proto_rawDescGZIP(), []int{96}
|
||||
}
|
||||
|
||||
func (x *CipherHeader) GetEzId() int64 {
|
||||
@ -5855,7 +5934,10 @@ var file_messages_proto_rawDesc = []byte{
|
||||
0x18, 0x4d, 0x61, 0x6e, 0x75, 0x61, 0x6c, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x45, 0x78, 0x74, 0x72,
|
||||
0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x67,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0a,
|
||||
0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a, 0x0a, 0x54, 0x78,
|
||||
0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x46, 0x6c,
|
||||
0x75, 0x73, 0x68, 0x41, 0x6c, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x65, 0x61,
|
||||
0x64, 0x65, 0x72, 0x22, 0x15, 0x0a, 0x13, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x41, 0x6c, 0x6c, 0x4d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x5a, 0x0a, 0x0a, 0x54, 0x78,
|
||||
0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x78, 0x6e, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x78, 0x6e, 0x49, 0x64, 0x12,
|
||||
0x35, 0x0a, 0x16, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6c,
|
||||
@ -5916,7 +5998,7 @@ var file_messages_proto_rawDesc = []byte{
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x61, 0x66, 0x65, 0x4b, 0x65, 0x79, 0x12,
|
||||
0x23, 0x0a, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42,
|
||||
0x79, 0x74, 0x65, 0x73, 0x2a, 0xf6, 0x05, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x79, 0x74, 0x65, 0x73, 0x2a, 0x84, 0x06, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10,
|
||||
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x10, 0x01, 0x12,
|
||||
0x0a, 0x0a, 0x06, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44,
|
||||
@ -5958,36 +6040,37 @@ var file_messages_proto_rawDesc = []byte{
|
||||
0x47, 0x72, 0x6f, 0x75, 0x70, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x22, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x23, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x72, 0x6f, 0x70,
|
||||
0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x24, 0x12, 0x19, 0x0a, 0x14, 0x41, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10,
|
||||
0xa0, 0x06, 0x12, 0x0d, 0x0a, 0x08, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x78, 0x6e, 0x10, 0x84,
|
||||
0x07, 0x12, 0x0e, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x78, 0x6e, 0x10, 0x85,
|
||||
0x07, 0x12, 0x10, 0x0a, 0x0b, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x78, 0x6e,
|
||||
0x10, 0x86, 0x07, 0x12, 0x08, 0x0a, 0x03, 0x54, 0x78, 0x6e, 0x10, 0xe7, 0x07, 0x2a, 0x74, 0x0a,
|
||||
0x08, 0x54, 0x78, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x78, 0x6e,
|
||||
0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e,
|
||||
0x49, 0x6e, 0x46, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78,
|
||||
0x6e, 0x4f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x54,
|
||||
0x78, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x10, 0x03, 0x12, 0x11, 0x0a,
|
||||
0x0d, 0x54, 0x78, 0x6e, 0x4f, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x04,
|
||||
0x12, 0x11, 0x0a, 0x0d, 0x54, 0x78, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x65,
|
||||
0x64, 0x10, 0x05, 0x2a, 0xc2, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
|
||||
0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10,
|
||||
0x00, 0x12, 0x21, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d,
|
||||
0x61, 0x69, 0x6e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x49, 0x44, 0x10, 0x01,
|
||||
0x1a, 0x02, 0x08, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
|
||||
0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x42, 0x4e, 0x61, 0x6d, 0x65, 0x10, 0x03,
|
||||
0x12, 0x1b, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61,
|
||||
0x69, 0x6e, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x10, 0x04, 0x12, 0x19, 0x0a,
|
||||
0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x43,
|
||||
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x10, 0x7f, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f,
|
||||
0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x70, 0x62, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x24, 0x12, 0x0c, 0x0a, 0x08, 0x46, 0x6c, 0x75, 0x73, 0x68,
|
||||
0x41, 0x6c, 0x6c, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x14, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65,
|
||||
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10, 0xa0, 0x06,
|
||||
0x12, 0x0d, 0x0a, 0x08, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x78, 0x6e, 0x10, 0x84, 0x07, 0x12,
|
||||
0x0e, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x78, 0x6e, 0x10, 0x85, 0x07, 0x12,
|
||||
0x10, 0x0a, 0x0b, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x78, 0x6e, 0x10, 0x86,
|
||||
0x07, 0x12, 0x08, 0x0a, 0x03, 0x54, 0x78, 0x6e, 0x10, 0xe7, 0x07, 0x2a, 0x74, 0x0a, 0x08, 0x54,
|
||||
0x78, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x78, 0x6e, 0x55, 0x6e,
|
||||
0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x49, 0x6e,
|
||||
0x46, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x4f,
|
||||
0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x78, 0x6e,
|
||||
0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x54,
|
||||
0x78, 0x6e, 0x4f, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x04, 0x12, 0x11,
|
||||
0x0a, 0x0d, 0x54, 0x78, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x10,
|
||||
0x05, 0x2a, 0xc2, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f,
|
||||
0x6d, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12,
|
||||
0x21, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69,
|
||||
0x6e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x49, 0x44, 0x10, 0x01, 0x1a, 0x02,
|
||||
0x08, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f,
|
||||
0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61,
|
||||
0x6d, 0x65, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x42, 0x4e, 0x61, 0x6d, 0x65, 0x10, 0x03, 0x12, 0x1b,
|
||||
0x0a, 0x17, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
|
||||
0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x10, 0x04, 0x12, 0x19, 0x0a, 0x15, 0x52,
|
||||
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6c, 0x75,
|
||||
0x73, 0x74, 0x65, 0x72, 0x10, 0x7f, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d,
|
||||
0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -6003,7 +6086,7 @@ func file_messages_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
|
||||
var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 98)
|
||||
var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 100)
|
||||
var file_messages_proto_goTypes = []interface{}{
|
||||
(MessageType)(0), // 0: milvus.proto.messages.MessageType
|
||||
(TxnState)(0), // 1: milvus.proto.messages.TxnState
|
||||
@ -6097,80 +6180,82 @@ var file_messages_proto_goTypes = []interface{}{
|
||||
(*CacheExpiration)(nil), // 89: milvus.proto.messages.CacheExpiration
|
||||
(*LegacyProxyCollectionMetaCache)(nil), // 90: milvus.proto.messages.LegacyProxyCollectionMetaCache
|
||||
(*ManualFlushExtraResponse)(nil), // 91: milvus.proto.messages.ManualFlushExtraResponse
|
||||
(*TxnContext)(nil), // 92: milvus.proto.messages.TxnContext
|
||||
(*RMQMessageLayout)(nil), // 93: milvus.proto.messages.RMQMessageLayout
|
||||
(*BroadcastHeader)(nil), // 94: milvus.proto.messages.BroadcastHeader
|
||||
(*ReplicateHeader)(nil), // 95: milvus.proto.messages.ReplicateHeader
|
||||
(*ResourceKey)(nil), // 96: milvus.proto.messages.ResourceKey
|
||||
(*CipherHeader)(nil), // 97: milvus.proto.messages.CipherHeader
|
||||
nil, // 98: milvus.proto.messages.Message.PropertiesEntry
|
||||
nil, // 99: milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry
|
||||
nil, // 100: milvus.proto.messages.RMQMessageLayout.PropertiesEntry
|
||||
(datapb.SegmentLevel)(0), // 101: milvus.proto.data.SegmentLevel
|
||||
(*commonpb.ReplicateConfiguration)(nil), // 102: milvus.proto.common.ReplicateConfiguration
|
||||
(*schemapb.CollectionSchema)(nil), // 103: milvus.proto.schema.CollectionSchema
|
||||
(*fieldmaskpb.FieldMask)(nil), // 104: google.protobuf.FieldMask
|
||||
(commonpb.ConsistencyLevel)(0), // 105: milvus.proto.common.ConsistencyLevel
|
||||
(*commonpb.KeyValuePair)(nil), // 106: milvus.proto.common.KeyValuePair
|
||||
(commonpb.LoadPriority)(0), // 107: milvus.proto.common.LoadPriority
|
||||
(*milvuspb.UserEntity)(nil), // 108: milvus.proto.milvus.UserEntity
|
||||
(*internalpb.CredentialInfo)(nil), // 109: milvus.proto.internal.CredentialInfo
|
||||
(*milvuspb.RoleEntity)(nil), // 110: milvus.proto.milvus.RoleEntity
|
||||
(*milvuspb.RBACMeta)(nil), // 111: milvus.proto.milvus.RBACMeta
|
||||
(*milvuspb.GrantEntity)(nil), // 112: milvus.proto.milvus.GrantEntity
|
||||
(*milvuspb.PrivilegeGroupInfo)(nil), // 113: milvus.proto.milvus.PrivilegeGroupInfo
|
||||
(*indexpb.FieldIndex)(nil), // 114: milvus.proto.index.FieldIndex
|
||||
(commonpb.MsgType)(0), // 115: milvus.proto.common.MsgType
|
||||
(*commonpb.MessageID)(nil), // 116: milvus.proto.common.MessageID
|
||||
(*rgpb.ResourceGroupConfig)(nil), // 117: milvus.proto.rg.ResourceGroupConfig
|
||||
(*FlushAllMessageHeader)(nil), // 92: milvus.proto.messages.FlushAllMessageHeader
|
||||
(*FlushAllMessageBody)(nil), // 93: milvus.proto.messages.FlushAllMessageBody
|
||||
(*TxnContext)(nil), // 94: milvus.proto.messages.TxnContext
|
||||
(*RMQMessageLayout)(nil), // 95: milvus.proto.messages.RMQMessageLayout
|
||||
(*BroadcastHeader)(nil), // 96: milvus.proto.messages.BroadcastHeader
|
||||
(*ReplicateHeader)(nil), // 97: milvus.proto.messages.ReplicateHeader
|
||||
(*ResourceKey)(nil), // 98: milvus.proto.messages.ResourceKey
|
||||
(*CipherHeader)(nil), // 99: milvus.proto.messages.CipherHeader
|
||||
nil, // 100: milvus.proto.messages.Message.PropertiesEntry
|
||||
nil, // 101: milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry
|
||||
nil, // 102: milvus.proto.messages.RMQMessageLayout.PropertiesEntry
|
||||
(datapb.SegmentLevel)(0), // 103: milvus.proto.data.SegmentLevel
|
||||
(*commonpb.ReplicateConfiguration)(nil), // 104: milvus.proto.common.ReplicateConfiguration
|
||||
(*schemapb.CollectionSchema)(nil), // 105: milvus.proto.schema.CollectionSchema
|
||||
(*fieldmaskpb.FieldMask)(nil), // 106: google.protobuf.FieldMask
|
||||
(commonpb.ConsistencyLevel)(0), // 107: milvus.proto.common.ConsistencyLevel
|
||||
(*commonpb.KeyValuePair)(nil), // 108: milvus.proto.common.KeyValuePair
|
||||
(commonpb.LoadPriority)(0), // 109: milvus.proto.common.LoadPriority
|
||||
(*milvuspb.UserEntity)(nil), // 110: milvus.proto.milvus.UserEntity
|
||||
(*internalpb.CredentialInfo)(nil), // 111: milvus.proto.internal.CredentialInfo
|
||||
(*milvuspb.RoleEntity)(nil), // 112: milvus.proto.milvus.RoleEntity
|
||||
(*milvuspb.RBACMeta)(nil), // 113: milvus.proto.milvus.RBACMeta
|
||||
(*milvuspb.GrantEntity)(nil), // 114: milvus.proto.milvus.GrantEntity
|
||||
(*milvuspb.PrivilegeGroupInfo)(nil), // 115: milvus.proto.milvus.PrivilegeGroupInfo
|
||||
(*indexpb.FieldIndex)(nil), // 116: milvus.proto.index.FieldIndex
|
||||
(commonpb.MsgType)(0), // 117: milvus.proto.common.MsgType
|
||||
(*commonpb.MessageID)(nil), // 118: milvus.proto.common.MessageID
|
||||
(*rgpb.ResourceGroupConfig)(nil), // 119: milvus.proto.rg.ResourceGroupConfig
|
||||
}
|
||||
var file_messages_proto_depIdxs = []int32{
|
||||
98, // 0: milvus.proto.messages.Message.properties:type_name -> milvus.proto.messages.Message.PropertiesEntry
|
||||
100, // 0: milvus.proto.messages.Message.properties:type_name -> milvus.proto.messages.Message.PropertiesEntry
|
||||
3, // 1: milvus.proto.messages.TxnMessageBody.messages:type_name -> milvus.proto.messages.Message
|
||||
13, // 2: milvus.proto.messages.InsertMessageHeader.partitions:type_name -> milvus.proto.messages.PartitionSegmentAssignment
|
||||
14, // 3: milvus.proto.messages.PartitionSegmentAssignment.segment_assignment:type_name -> milvus.proto.messages.SegmentAssignment
|
||||
101, // 4: milvus.proto.messages.CreateSegmentMessageHeader.level:type_name -> milvus.proto.data.SegmentLevel
|
||||
102, // 5: milvus.proto.messages.AlterReplicateConfigMessageHeader.replicate_configuration:type_name -> milvus.proto.common.ReplicateConfiguration
|
||||
103, // 6: milvus.proto.messages.SchemaChangeMessageBody.schema:type_name -> milvus.proto.schema.CollectionSchema
|
||||
104, // 7: milvus.proto.messages.AlterCollectionMessageHeader.update_mask:type_name -> google.protobuf.FieldMask
|
||||
103, // 4: milvus.proto.messages.CreateSegmentMessageHeader.level:type_name -> milvus.proto.data.SegmentLevel
|
||||
104, // 5: milvus.proto.messages.AlterReplicateConfigMessageHeader.replicate_configuration:type_name -> milvus.proto.common.ReplicateConfiguration
|
||||
105, // 6: milvus.proto.messages.SchemaChangeMessageBody.schema:type_name -> milvus.proto.schema.CollectionSchema
|
||||
106, // 7: milvus.proto.messages.AlterCollectionMessageHeader.update_mask:type_name -> google.protobuf.FieldMask
|
||||
88, // 8: milvus.proto.messages.AlterCollectionMessageHeader.cache_expirations:type_name -> milvus.proto.messages.CacheExpirations
|
||||
34, // 9: milvus.proto.messages.AlterCollectionMessageBody.updates:type_name -> milvus.proto.messages.AlterCollectionMessageUpdates
|
||||
103, // 10: milvus.proto.messages.AlterCollectionMessageUpdates.schema:type_name -> milvus.proto.schema.CollectionSchema
|
||||
105, // 11: milvus.proto.messages.AlterCollectionMessageUpdates.consistency_level:type_name -> milvus.proto.common.ConsistencyLevel
|
||||
106, // 12: milvus.proto.messages.AlterCollectionMessageUpdates.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
105, // 10: milvus.proto.messages.AlterCollectionMessageUpdates.schema:type_name -> milvus.proto.schema.CollectionSchema
|
||||
107, // 11: milvus.proto.messages.AlterCollectionMessageUpdates.consistency_level:type_name -> milvus.proto.common.ConsistencyLevel
|
||||
108, // 12: milvus.proto.messages.AlterCollectionMessageUpdates.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
35, // 13: milvus.proto.messages.AlterCollectionMessageUpdates.alter_load_config:type_name -> milvus.proto.messages.AlterLoadConfigOfAlterCollection
|
||||
38, // 14: milvus.proto.messages.AlterLoadConfigMessageHeader.load_fields:type_name -> milvus.proto.messages.LoadFieldConfig
|
||||
39, // 15: milvus.proto.messages.AlterLoadConfigMessageHeader.replicas:type_name -> milvus.proto.messages.LoadReplicaConfig
|
||||
107, // 16: milvus.proto.messages.LoadReplicaConfig.priority:type_name -> milvus.proto.common.LoadPriority
|
||||
106, // 17: milvus.proto.messages.CreateDatabaseMessageBody.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
106, // 18: milvus.proto.messages.AlterDatabaseMessageBody.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
109, // 16: milvus.proto.messages.LoadReplicaConfig.priority:type_name -> milvus.proto.common.LoadPriority
|
||||
108, // 17: milvus.proto.messages.CreateDatabaseMessageBody.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
108, // 18: milvus.proto.messages.AlterDatabaseMessageBody.properties:type_name -> milvus.proto.common.KeyValuePair
|
||||
46, // 19: milvus.proto.messages.AlterDatabaseMessageBody.alter_load_config:type_name -> milvus.proto.messages.AlterLoadConfigOfAlterDatabase
|
||||
108, // 20: milvus.proto.messages.CreateUserMessageHeader.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
109, // 21: milvus.proto.messages.CreateUserMessageBody.credential_info:type_name -> milvus.proto.internal.CredentialInfo
|
||||
108, // 22: milvus.proto.messages.AlterUserMessageHeader.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
109, // 23: milvus.proto.messages.AlterUserMessageBody.credential_info:type_name -> milvus.proto.internal.CredentialInfo
|
||||
110, // 24: milvus.proto.messages.AlterRoleMessageHeader.role_entity:type_name -> milvus.proto.milvus.RoleEntity
|
||||
108, // 25: milvus.proto.messages.RoleBinding.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
110, // 26: milvus.proto.messages.RoleBinding.role_entity:type_name -> milvus.proto.milvus.RoleEntity
|
||||
110, // 20: milvus.proto.messages.CreateUserMessageHeader.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
111, // 21: milvus.proto.messages.CreateUserMessageBody.credential_info:type_name -> milvus.proto.internal.CredentialInfo
|
||||
110, // 22: milvus.proto.messages.AlterUserMessageHeader.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
111, // 23: milvus.proto.messages.AlterUserMessageBody.credential_info:type_name -> milvus.proto.internal.CredentialInfo
|
||||
112, // 24: milvus.proto.messages.AlterRoleMessageHeader.role_entity:type_name -> milvus.proto.milvus.RoleEntity
|
||||
110, // 25: milvus.proto.messages.RoleBinding.user_entity:type_name -> milvus.proto.milvus.UserEntity
|
||||
112, // 26: milvus.proto.messages.RoleBinding.role_entity:type_name -> milvus.proto.milvus.RoleEntity
|
||||
63, // 27: milvus.proto.messages.AlterUserRoleMessageHeader.role_binding:type_name -> milvus.proto.messages.RoleBinding
|
||||
63, // 28: milvus.proto.messages.DropUserRoleMessageHeader.role_binding:type_name -> milvus.proto.messages.RoleBinding
|
||||
111, // 29: milvus.proto.messages.RestoreRBACMessageBody.rbac_meta:type_name -> milvus.proto.milvus.RBACMeta
|
||||
112, // 30: milvus.proto.messages.AlterPrivilegeMessageHeader.entity:type_name -> milvus.proto.milvus.GrantEntity
|
||||
112, // 31: milvus.proto.messages.DropPrivilegeMessageHeader.entity:type_name -> milvus.proto.milvus.GrantEntity
|
||||
113, // 32: milvus.proto.messages.AlterPrivilegeGroupMessageHeader.privilege_group_info:type_name -> milvus.proto.milvus.PrivilegeGroupInfo
|
||||
113, // 33: milvus.proto.messages.DropPrivilegeGroupMessageHeader.privilege_group_info:type_name -> milvus.proto.milvus.PrivilegeGroupInfo
|
||||
99, // 34: milvus.proto.messages.AlterResourceGroupMessageHeader.resource_group_configs:type_name -> milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry
|
||||
114, // 35: milvus.proto.messages.CreateIndexMessageBody.field_index:type_name -> milvus.proto.index.FieldIndex
|
||||
114, // 36: milvus.proto.messages.AlterIndexMessageBody.field_indexes:type_name -> milvus.proto.index.FieldIndex
|
||||
113, // 29: milvus.proto.messages.RestoreRBACMessageBody.rbac_meta:type_name -> milvus.proto.milvus.RBACMeta
|
||||
114, // 30: milvus.proto.messages.AlterPrivilegeMessageHeader.entity:type_name -> milvus.proto.milvus.GrantEntity
|
||||
114, // 31: milvus.proto.messages.DropPrivilegeMessageHeader.entity:type_name -> milvus.proto.milvus.GrantEntity
|
||||
115, // 32: milvus.proto.messages.AlterPrivilegeGroupMessageHeader.privilege_group_info:type_name -> milvus.proto.milvus.PrivilegeGroupInfo
|
||||
115, // 33: milvus.proto.messages.DropPrivilegeGroupMessageHeader.privilege_group_info:type_name -> milvus.proto.milvus.PrivilegeGroupInfo
|
||||
101, // 34: milvus.proto.messages.AlterResourceGroupMessageHeader.resource_group_configs:type_name -> milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry
|
||||
116, // 35: milvus.proto.messages.CreateIndexMessageBody.field_index:type_name -> milvus.proto.index.FieldIndex
|
||||
116, // 36: milvus.proto.messages.AlterIndexMessageBody.field_indexes:type_name -> milvus.proto.index.FieldIndex
|
||||
89, // 37: milvus.proto.messages.CacheExpirations.cache_expirations:type_name -> milvus.proto.messages.CacheExpiration
|
||||
90, // 38: milvus.proto.messages.CacheExpiration.legacy_proxy_collection_meta_cache:type_name -> milvus.proto.messages.LegacyProxyCollectionMetaCache
|
||||
115, // 39: milvus.proto.messages.LegacyProxyCollectionMetaCache.msg_type:type_name -> milvus.proto.common.MsgType
|
||||
100, // 40: milvus.proto.messages.RMQMessageLayout.properties:type_name -> milvus.proto.messages.RMQMessageLayout.PropertiesEntry
|
||||
96, // 41: milvus.proto.messages.BroadcastHeader.Resource_keys:type_name -> milvus.proto.messages.ResourceKey
|
||||
116, // 42: milvus.proto.messages.ReplicateHeader.message_id:type_name -> milvus.proto.common.MessageID
|
||||
116, // 43: milvus.proto.messages.ReplicateHeader.last_confirmed_message_id:type_name -> milvus.proto.common.MessageID
|
||||
117, // 39: milvus.proto.messages.LegacyProxyCollectionMetaCache.msg_type:type_name -> milvus.proto.common.MsgType
|
||||
102, // 40: milvus.proto.messages.RMQMessageLayout.properties:type_name -> milvus.proto.messages.RMQMessageLayout.PropertiesEntry
|
||||
98, // 41: milvus.proto.messages.BroadcastHeader.Resource_keys:type_name -> milvus.proto.messages.ResourceKey
|
||||
118, // 42: milvus.proto.messages.ReplicateHeader.message_id:type_name -> milvus.proto.common.MessageID
|
||||
118, // 43: milvus.proto.messages.ReplicateHeader.last_confirmed_message_id:type_name -> milvus.proto.common.MessageID
|
||||
2, // 44: milvus.proto.messages.ResourceKey.domain:type_name -> milvus.proto.messages.ResourceDomain
|
||||
117, // 45: milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry.value:type_name -> milvus.proto.rg.ResourceGroupConfig
|
||||
119, // 45: milvus.proto.messages.AlterResourceGroupMessageHeader.ResourceGroupConfigsEntry.value:type_name -> milvus.proto.rg.ResourceGroupConfig
|
||||
46, // [46:46] is the sub-list for method output_type
|
||||
46, // [46:46] is the sub-list for method input_type
|
||||
46, // [46:46] is the sub-list for extension type_name
|
||||
@ -7253,7 +7338,7 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TxnContext); i {
|
||||
switch v := v.(*FlushAllMessageHeader); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@ -7265,7 +7350,7 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RMQMessageLayout); i {
|
||||
switch v := v.(*FlushAllMessageBody); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@ -7277,7 +7362,7 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BroadcastHeader); i {
|
||||
switch v := v.(*TxnContext); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@ -7289,7 +7374,7 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ReplicateHeader); i {
|
||||
switch v := v.(*RMQMessageLayout); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@ -7301,7 +7386,7 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ResourceKey); i {
|
||||
switch v := v.(*BroadcastHeader); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@ -7313,6 +7398,30 @@ func file_messages_proto_init() {
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ReplicateHeader); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ResourceKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_messages_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CipherHeader); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@ -7334,7 +7443,7 @@ func file_messages_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_messages_proto_rawDesc,
|
||||
NumEnums: 3,
|
||||
NumMessages: 98,
|
||||
NumMessages: 100,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@ -133,6 +133,8 @@ func fromMessageToTsMsgV2(msg message.ImmutableMessage) (msgstream.TsMsg, error)
|
||||
tsMsg, err = NewFlushMessageBody(msg)
|
||||
case message.MessageTypeManualFlush:
|
||||
tsMsg, err = NewManualFlushMessageBody(msg)
|
||||
case message.MessageTypeFlushAll:
|
||||
tsMsg, err = NewFlushAllMessageBody(msg)
|
||||
case message.MessageTypeCreateSegment:
|
||||
tsMsg, err = NewCreateSegmentMessageBody(msg)
|
||||
case message.MessageTypeSchemaChange:
|
||||
|
||||
@ -11,6 +11,7 @@ var messageTypeToCommonpbMsgType = map[message.MessageType]commonpb.MsgType{
|
||||
message.MessageTypeDelete: commonpb.MsgType_Delete,
|
||||
message.MessageTypeFlush: commonpb.MsgType_FlushSegment,
|
||||
message.MessageTypeManualFlush: commonpb.MsgType_ManualFlush,
|
||||
message.MessageTypeFlushAll: commonpb.MsgType_FlushAll,
|
||||
message.MessageTypeCreateSegment: commonpb.MsgType_CreateSegment,
|
||||
message.MessageTypeCreateCollection: commonpb.MsgType_CreateCollection,
|
||||
message.MessageTypeDropCollection: commonpb.MsgType_DropCollection,
|
||||
|
||||
@ -122,6 +122,30 @@ func NewManualFlushMessageBody(msg message.ImmutableMessage) (msgstream.TsMsg, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
type FlushAllMessageBody struct {
|
||||
*tsMsgImpl
|
||||
FlushAllMessage message.ImmutableFlushAllMessageV2
|
||||
}
|
||||
|
||||
func NewFlushAllMessageBody(msg message.ImmutableMessage) (msgstream.TsMsg, error) {
|
||||
flushAllMsg, err := message.AsImmutableFlushAllMessageV2(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FlushAllMessageBody{
|
||||
tsMsgImpl: &tsMsgImpl{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: msg.TimeTick(),
|
||||
EndTimestamp: msg.TimeTick(),
|
||||
},
|
||||
ts: msg.TimeTick(),
|
||||
sz: msg.EstimateSize(),
|
||||
msgType: MustGetCommonpbMsgTypeFromMessageType(msg.MessageType()),
|
||||
},
|
||||
FlushAllMessage: flushAllMsg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SchemaChangeMessageBody struct {
|
||||
*tsMsgImpl
|
||||
SchemaChangeMessage message.ImmutableSchemaChangeMessageV2
|
||||
|
||||
@ -434,6 +434,16 @@
|
||||
"MessageType": "MessageTypeDropIndex",
|
||||
"Version": 2
|
||||
}
|
||||
},
|
||||
{
|
||||
"MessageSpecializedType": {
|
||||
"HeaderType": "messagespb.FlushAllMessageHeader",
|
||||
"BodyType": "messagespb.FlushAllMessageBody"
|
||||
},
|
||||
"MessageTypeWithVersion": {
|
||||
"MessageType": "MessageTypeFlushAll",
|
||||
"Version": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -64,6 +64,7 @@ const (
|
||||
MessageTypeCreateIndex MessageType = MessageType(messagespb.MessageType_CreateIndex)
|
||||
MessageTypeAlterIndex MessageType = MessageType(messagespb.MessageType_AlterIndex)
|
||||
MessageTypeDropIndex MessageType = MessageType(messagespb.MessageType_DropIndex)
|
||||
MessageTypeFlushAll MessageType = MessageType(messagespb.MessageType_FlushAll)
|
||||
)
|
||||
|
||||
// Export extra message type
|
||||
@ -164,6 +165,8 @@ type (
|
||||
AlterIndexMessageBody = messagespb.AlterIndexMessageBody
|
||||
DropIndexMessageHeader = messagespb.DropIndexMessageHeader
|
||||
DropIndexMessageBody = messagespb.DropIndexMessageBody
|
||||
FlushAllMessageHeader = messagespb.FlushAllMessageHeader
|
||||
FlushAllMessageBody = messagespb.FlushAllMessageBody
|
||||
)
|
||||
|
||||
// Type aliases for TimeTickMessageV1
|
||||
@ -1826,6 +1829,47 @@ var MustAsBroadcastDropIndexMessageV2 = MustAsSpecializedBroadcastMessage[*DropI
|
||||
// NewDropIndexMessageBuilderV2 creates a new message builder for DropIndexMessageV2
|
||||
var NewDropIndexMessageBuilderV2 = newMutableMessageBuilder[*DropIndexMessageHeader, *DropIndexMessageBody]
|
||||
|
||||
// Type aliases for FlushAllMessageV2
|
||||
type (
|
||||
MutableFlushAllMessageV2 = specializedMutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
ImmutableFlushAllMessageV2 = SpecializedImmutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
BroadcastFlushAllMessageV2 = SpecializedBroadcastMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
BroadcastResultFlushAllMessageV2 = BroadcastResult[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
)
|
||||
|
||||
// MessageTypeWithVersion for FlushAllMessageV2
|
||||
var MessageTypeFlushAllV2 = MessageTypeWithVersion{
|
||||
MessageType: MessageTypeFlushAll,
|
||||
Version: VersionV2,
|
||||
}
|
||||
|
||||
// MessageSpecializedType for FlushAllMessageV2
|
||||
var SpecializedTypeFlushAllV2 = MessageSpecializedType{
|
||||
BodyType: reflect.TypeOf((*FlushAllMessageBody)(nil)),
|
||||
HeaderType: reflect.TypeOf((*FlushAllMessageHeader)(nil)),
|
||||
}
|
||||
|
||||
// AsMutableFlushAllMessageV2 converts a BasicMessage to MutableFlushAllMessageV2
|
||||
var AsMutableFlushAllMessageV2 = asSpecializedMutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// MustAsMutableFlushAllMessageV2 converts a BasicMessage to MutableFlushAllMessageV2, panics on error
|
||||
var MustAsMutableFlushAllMessageV2 = mustAsSpecializedMutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// AsImmutableFlushAllMessageV2 converts an ImmutableMessage to ImmutableFlushAllMessageV2
|
||||
var AsImmutableFlushAllMessageV2 = asSpecializedImmutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// MustAsImmutableFlushAllMessageV2 converts an ImmutableMessage to ImmutableFlushAllMessageV2, panics on error
|
||||
var MustAsImmutableFlushAllMessageV2 = MustAsSpecializedImmutableMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// AsBroadcastFlushAllMessageV2 converts a BasicMessage to BroadcastFlushAllMessageV2
|
||||
var AsBroadcastFlushAllMessageV2 = asSpecializedBroadcastMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// MustAsBroadcastFlushAllMessageV2 converts a BasicMessage to BroadcastFlushAllMessageV2, panics on error
|
||||
var MustAsBroadcastFlushAllMessageV2 = MustAsSpecializedBroadcastMessage[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// NewFlushAllMessageBuilderV2 creates a new message builder for FlushAllMessageV2
|
||||
var NewFlushAllMessageBuilderV2 = newMutableMessageBuilder[*FlushAllMessageHeader, *FlushAllMessageBody]
|
||||
|
||||
// messageTypeMap make the contriants that one header type can only be used for one message type.
|
||||
var messageTypeMap = map[reflect.Type]MessageType{
|
||||
reflect.TypeOf(&messagespb.AlterAliasMessageHeader{}): MessageTypeAlterAlias,
|
||||
@ -1860,6 +1904,7 @@ var messageTypeMap = map[reflect.Type]MessageType{
|
||||
reflect.TypeOf(&messagespb.DropRoleMessageHeader{}): MessageTypeDropRole,
|
||||
reflect.TypeOf(&messagespb.DropUserMessageHeader{}): MessageTypeDropUser,
|
||||
reflect.TypeOf(&messagespb.DropUserRoleMessageHeader{}): MessageTypeDropUserRole,
|
||||
reflect.TypeOf(&messagespb.FlushAllMessageHeader{}): MessageTypeFlushAll,
|
||||
reflect.TypeOf(&messagespb.FlushMessageHeader{}): MessageTypeFlush,
|
||||
reflect.TypeOf(&messagespb.ImportMessageHeader{}): MessageTypeImport,
|
||||
reflect.TypeOf(&messagespb.InsertMessageHeader{}): MessageTypeInsert,
|
||||
@ -1921,6 +1966,7 @@ var messageTypeVersionSpecializedMap = map[MessageTypeWithVersion]MessageSpecial
|
||||
MessageTypeDropRoleV2: SpecializedTypeDropRoleV2,
|
||||
MessageTypeDropUserRoleV2: SpecializedTypeDropUserRoleV2,
|
||||
MessageTypeDropUserV2: SpecializedTypeDropUserV2,
|
||||
MessageTypeFlushAllV2: SpecializedTypeFlushAllV2,
|
||||
MessageTypeFlushV2: SpecializedTypeFlushV2,
|
||||
MessageTypeImportV1: SpecializedTypeImportV1,
|
||||
MessageTypeInsertV1: SpecializedTypeInsertV1,
|
||||
@ -1966,6 +2012,7 @@ var messageSpecializedTypeVersionMap = map[MessageSpecializedType]MessageTypeWit
|
||||
SpecializedTypeDropRoleV2: MessageTypeDropRoleV2,
|
||||
SpecializedTypeDropUserRoleV2: MessageTypeDropUserRoleV2,
|
||||
SpecializedTypeDropUserV2: MessageTypeDropUserV2,
|
||||
SpecializedTypeFlushAllV2: MessageTypeFlushAllV2,
|
||||
SpecializedTypeFlushV2: MessageTypeFlushV2,
|
||||
SpecializedTypeImportV1: MessageTypeImportV1,
|
||||
SpecializedTypeInsertV1: MessageTypeInsertV1,
|
||||
|
||||
@ -34,6 +34,7 @@ var exclusiveRequiredMessageType = map[MessageType]struct{}{
|
||||
MessageTypeCreatePartition: {},
|
||||
MessageTypeDropPartition: {},
|
||||
MessageTypeManualFlush: {},
|
||||
MessageTypeFlushAll: {},
|
||||
MessageTypeSchemaChange: {},
|
||||
MessageTypeAlterReplicateConfig: {},
|
||||
MessageTypeAlterCollection: {},
|
||||
|
||||
@ -54,7 +54,7 @@ require (
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45 // indirect
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
|
||||
@ -332,8 +332,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45 h1:TMUhlirMCH2zgJD+qClP5EP0yuFl1VrE4j+0fiRSuJU=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251124145901-0b96e4c8af45/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6 h1:TeHfsRCdjbX30xS7Npcb+POQXd460+AjmXYmmTuxyBA=
|
||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.6.6-0.20251210030907-6087c9c0bad6/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs=
|
||||
github.com/milvus-io/milvus/pkg/v2 v2.6.4-0.20251104142533-a2ce70d25256 h1:M2waty0w2k4YT2HHzJk3fx6EFPD4DKxNJatitIV+gGU=
|
||||
github.com/milvus-io/milvus/pkg/v2 v2.6.4-0.20251104142533-a2ce70d25256/go.mod h1:HT6Wxahwj/l8+i+D/C3iwDzCjDa36U9gyVw6CjjK4pE=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
|
||||
163
tests/integration/flushall/flushall_test.go
Normal file
163
tests/integration/flushall/flushall_test.go
Normal file
@ -0,0 +1,163 @@
|
||||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package flushall
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/tests/integration"
|
||||
)
|
||||
|
||||
type FlushAllSuite struct {
|
||||
integration.MiniClusterSuite
|
||||
}
|
||||
|
||||
func (s *FlushAllSuite) WaitForFlushAll(ctx context.Context, flushTss map[string]uint64) {
|
||||
flushed := func() bool {
|
||||
resp, err := s.Cluster.MilvusClient.GetFlushAllState(ctx, &milvuspb.GetFlushAllStateRequest{
|
||||
FlushAllTss: flushTss,
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return resp.GetFlushed()
|
||||
}
|
||||
for !flushed() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.FailNow("failed to wait for flush until ctx done")
|
||||
return
|
||||
default:
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test flush all database and dbs
|
||||
func (s *FlushAllSuite) TestFlushAll() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
c := s.Cluster
|
||||
|
||||
const (
|
||||
dim = 8
|
||||
rowNum = 100
|
||||
dbCnt = 10
|
||||
colCntPerDB = 10
|
||||
)
|
||||
|
||||
collectionNames := make(map[string]string) // collection name -> db name
|
||||
for i := 0; i < dbCnt; i++ {
|
||||
// create db
|
||||
dbName := fmt.Sprintf("TestFlushAll_db_%d_%s", i, funcutil.GenRandomStr())
|
||||
status, err := c.MilvusClient.CreateDatabase(ctx, &milvuspb.CreateDatabaseRequest{
|
||||
DbName: dbName,
|
||||
})
|
||||
s.NoError(merr.CheckRPCCall(status, err))
|
||||
|
||||
for j := 0; j < colCntPerDB; j++ {
|
||||
collectionName := fmt.Sprintf("TestFlushAll_collection_%d_%d_%s", i, j, funcutil.GenRandomStr())
|
||||
collectionNames[collectionName] = dbName
|
||||
}
|
||||
}
|
||||
|
||||
execFunc := func(collectionName string, dbName string) {
|
||||
// create collection
|
||||
schema := integration.ConstructSchemaOfVecDataType(collectionName, dim, true, schemapb.DataType_FloatVector)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
s.NoError(err)
|
||||
|
||||
createCollectionStatus, err := c.MilvusClient.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
})
|
||||
s.NoError(merr.CheckRPCCall(createCollectionStatus, err))
|
||||
|
||||
// insert data
|
||||
fVecColumn := integration.NewFloatVectorFieldData(integration.FloatVecField, rowNum, dim)
|
||||
hashKeys := integration.GenerateHashKeys(rowNum)
|
||||
insertResult, err := c.MilvusClient.Insert(ctx, &milvuspb.InsertRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
FieldsData: []*schemapb.FieldData{fVecColumn},
|
||||
HashKeys: hashKeys,
|
||||
NumRows: uint32(rowNum),
|
||||
})
|
||||
s.NoError(merr.CheckRPCCall(insertResult, err))
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for collectionName, dbName := range collectionNames {
|
||||
wg.Add(1)
|
||||
go func(collectionName string, dbName string) {
|
||||
defer wg.Done()
|
||||
execFunc(collectionName, dbName)
|
||||
}(collectionName, dbName)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// flush all
|
||||
flushAllResp, err := c.MilvusClient.FlushAll(ctx, &milvuspb.FlushAllRequest{})
|
||||
s.NoError(merr.CheckRPCCall(flushAllResp, err))
|
||||
log.Info("FlushAll succeed", zap.Any("flushAllTss", flushAllResp.GetFlushAllTss()))
|
||||
s.WaitForFlushAll(ctx, flushAllResp.GetFlushAllTss())
|
||||
|
||||
// show and validate segments
|
||||
for collectionName, dbName := range collectionNames {
|
||||
resp, err := c.MilvusClient.GetPersistentSegmentInfo(ctx, &milvuspb.GetPersistentSegmentInfoRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
s.NoError(merr.CheckRPCCall(resp, err))
|
||||
s.Len(resp.GetInfos(), 1)
|
||||
segment := resp.GetInfos()[0]
|
||||
s.Equal(segment.GetState(), commonpb.SegmentState_Flushed)
|
||||
s.Equal(segment.GetNumRows(), int64(rowNum))
|
||||
}
|
||||
|
||||
// drop collections
|
||||
for collectionName, dbName := range collectionNames {
|
||||
status, err := c.MilvusClient.DropCollection(ctx, &milvuspb.DropCollectionRequest{
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
s.NoError(merr.CheckRPCCall(status, err))
|
||||
}
|
||||
|
||||
log.Info("TestFlushAll succeed")
|
||||
}
|
||||
|
||||
func TestFlushAll(t *testing.T) {
|
||||
suite.Run(t, new(FlushAllSuite))
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user