enhance: use recovery+shardmanager, remove segment assignment interceptor (#41824)

issue: #41544

- add lock interceptor into wal.
- use recovery and shardmanager to replace the original implementation
of segment assignment.
- remove redundant implementation and unittest.
- remove redundant proto definition.
- use 2 streamingnode in e2e.

---------

Signed-off-by: chyezh <chyezh@outlook.com>
This commit is contained in:
Zhen Ye 2025-05-14 23:00:23 +08:00 committed by GitHub
parent 2d0ae3a709
commit 0a465bb5b7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
90 changed files with 2356 additions and 4245 deletions

2
go.mod
View File

@ -243,7 +243,7 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0 // indirect
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/v2 v2.305.5 // indirect

4
go.sum
View File

@ -1066,8 +1066,8 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0 h1:6B7IUyTRarQVTvusRS0bs6aJn3tUTVTIVqPEOj5IQHM=
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0/go.mod h1:MLt2hsMXd5bVOykwZyWXYHsy9kN4C2gQEaCrID5rM1w=
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc h1:9KEOCnDt//GAimP3Z3Qh08VwPY7H9AOOjHx9C9ckMSQ=
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc/go.mod h1:MLt2hsMXd5bVOykwZyWXYHsy9kN4C2gQEaCrID5rM1w=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=

View File

@ -52,12 +52,18 @@ packages:
InterceptorWithReady:
InterceptorWithMetrics:
InterceptorBuilder:
github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards:
interfaces:
ShardManager:
github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/utils:
interfaces:
SealOperator:
github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick/inspector:
interfaces:
TimeTickSyncOperator:
github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery:
interfaces:
RecoveryStorage:
github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/wab:
interfaces:
ROWriteAheadBuffer:

View File

@ -133,6 +133,8 @@ func (rc *resumableConsumerImpl) createNewConsumer(opts *handler.ConsumerOptions
backoff.InitialInterval = 100 * time.Millisecond
backoff.MaxInterval = 10 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
for {
// Create a new consumer.
// a underlying stream consumer life time should be equal to the resumable producer.

View File

@ -154,6 +154,8 @@ func (p *ResumableProducer) createNewProducer() (producer.Producer, error) {
backoff.InitialInterval = 100 * time.Millisecond
backoff.MaxInterval = 10 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
for {
// Create a new producer.
// a underlying stream producer life time should be equal to the resumable producer.

View File

@ -4,6 +4,8 @@ import (
"context"
"sync"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
@ -109,6 +111,7 @@ func (u *walAccesserImpl) appendToVChannel(ctx context.Context, vchannel string,
// if the transaction is expired,
// there may be wal is transferred to another streaming node,
// retry it with new transaction.
u.Logger().Warn("transaction expired, retrying", zap.String("vchannel", vchannel), zap.Error(err))
continue
}
return resp

View File

@ -14,6 +14,7 @@ import (
"github.com/milvus-io/milvus/internal/util/streamingutil"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/internal/util/streamingutil/util"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/conc"
@ -33,7 +34,7 @@ func newWALAccesser(c *clientv3.Client) *walAccesserImpl {
// streaming service is enabled, create the handler client for the streaming service.
handlerClient = handler.NewHandlerClient(streamingCoordClient.Assignment())
}
return &walAccesserImpl{
w := &walAccesserImpl{
lifetime: typeutil.NewLifetime(),
streamingCoordClient: streamingCoordClient,
handlerClient: handlerClient,
@ -44,10 +45,13 @@ func newWALAccesser(c *clientv3.Client) *walAccesserImpl {
appendExecutionPool: conc.NewPool[struct{}](0),
dispatchExecutionPool: conc.NewPool[struct{}](0),
}
w.SetLogger(log.With(log.FieldComponent("wal-accesser")))
return w
}
// walAccesserImpl is the implementation of WALAccesser.
type walAccesserImpl struct {
log.Binder
lifetime *typeutil.Lifetime
// All services

View File

@ -1,259 +0,0 @@
// Code generated by mockery v2.53.3. DO NOT EDIT.
package mock_inspector
import (
context "context"
mock "github.com/stretchr/testify/mock"
stats "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
types "github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
)
// MockSealOperator is an autogenerated mock type for the SealOperator type
type MockSealOperator struct {
mock.Mock
}
type MockSealOperator_Expecter struct {
mock *mock.Mock
}
func (_m *MockSealOperator) EXPECT() *MockSealOperator_Expecter {
return &MockSealOperator_Expecter{mock: &_m.Mock}
}
// Channel provides a mock function with no fields
func (_m *MockSealOperator) Channel() types.PChannelInfo {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Channel")
}
var r0 types.PChannelInfo
if rf, ok := ret.Get(0).(func() types.PChannelInfo); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(types.PChannelInfo)
}
return r0
}
// MockSealOperator_Channel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Channel'
type MockSealOperator_Channel_Call struct {
*mock.Call
}
// Channel is a helper method to define mock.On call
func (_e *MockSealOperator_Expecter) Channel() *MockSealOperator_Channel_Call {
return &MockSealOperator_Channel_Call{Call: _e.mock.On("Channel")}
}
func (_c *MockSealOperator_Channel_Call) Run(run func()) *MockSealOperator_Channel_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockSealOperator_Channel_Call) Return(_a0 types.PChannelInfo) *MockSealOperator_Channel_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockSealOperator_Channel_Call) RunAndReturn(run func() types.PChannelInfo) *MockSealOperator_Channel_Call {
_c.Call.Return(run)
return _c
}
// IsNoWaitSeal provides a mock function with no fields
func (_m *MockSealOperator) IsNoWaitSeal() bool {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for IsNoWaitSeal")
}
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockSealOperator_IsNoWaitSeal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsNoWaitSeal'
type MockSealOperator_IsNoWaitSeal_Call struct {
*mock.Call
}
// IsNoWaitSeal is a helper method to define mock.On call
func (_e *MockSealOperator_Expecter) IsNoWaitSeal() *MockSealOperator_IsNoWaitSeal_Call {
return &MockSealOperator_IsNoWaitSeal_Call{Call: _e.mock.On("IsNoWaitSeal")}
}
func (_c *MockSealOperator_IsNoWaitSeal_Call) Run(run func()) *MockSealOperator_IsNoWaitSeal_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockSealOperator_IsNoWaitSeal_Call) Return(_a0 bool) *MockSealOperator_IsNoWaitSeal_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockSealOperator_IsNoWaitSeal_Call) RunAndReturn(run func() bool) *MockSealOperator_IsNoWaitSeal_Call {
_c.Call.Return(run)
return _c
}
// MustSealSegments provides a mock function with given fields: ctx, infos
func (_m *MockSealOperator) MustSealSegments(ctx context.Context, infos ...stats.SegmentBelongs) {
_va := make([]interface{}, len(infos))
for _i := range infos {
_va[_i] = infos[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx)
_ca = append(_ca, _va...)
_m.Called(_ca...)
}
// MockSealOperator_MustSealSegments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MustSealSegments'
type MockSealOperator_MustSealSegments_Call struct {
*mock.Call
}
// MustSealSegments is a helper method to define mock.On call
// - ctx context.Context
// - infos ...stats.SegmentBelongs
func (_e *MockSealOperator_Expecter) MustSealSegments(ctx interface{}, infos ...interface{}) *MockSealOperator_MustSealSegments_Call {
return &MockSealOperator_MustSealSegments_Call{Call: _e.mock.On("MustSealSegments",
append([]interface{}{ctx}, infos...)...)}
}
func (_c *MockSealOperator_MustSealSegments_Call) Run(run func(ctx context.Context, infos ...stats.SegmentBelongs)) *MockSealOperator_MustSealSegments_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]stats.SegmentBelongs, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(stats.SegmentBelongs)
}
}
run(args[0].(context.Context), variadicArgs...)
})
return _c
}
func (_c *MockSealOperator_MustSealSegments_Call) Return() *MockSealOperator_MustSealSegments_Call {
_c.Call.Return()
return _c
}
func (_c *MockSealOperator_MustSealSegments_Call) RunAndReturn(run func(context.Context, ...stats.SegmentBelongs)) *MockSealOperator_MustSealSegments_Call {
_c.Run(run)
return _c
}
// TryToSealSegments provides a mock function with given fields: ctx, infos
func (_m *MockSealOperator) TryToSealSegments(ctx context.Context, infos ...stats.SegmentBelongs) {
_va := make([]interface{}, len(infos))
for _i := range infos {
_va[_i] = infos[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx)
_ca = append(_ca, _va...)
_m.Called(_ca...)
}
// MockSealOperator_TryToSealSegments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TryToSealSegments'
type MockSealOperator_TryToSealSegments_Call struct {
*mock.Call
}
// TryToSealSegments is a helper method to define mock.On call
// - ctx context.Context
// - infos ...stats.SegmentBelongs
func (_e *MockSealOperator_Expecter) TryToSealSegments(ctx interface{}, infos ...interface{}) *MockSealOperator_TryToSealSegments_Call {
return &MockSealOperator_TryToSealSegments_Call{Call: _e.mock.On("TryToSealSegments",
append([]interface{}{ctx}, infos...)...)}
}
func (_c *MockSealOperator_TryToSealSegments_Call) Run(run func(ctx context.Context, infos ...stats.SegmentBelongs)) *MockSealOperator_TryToSealSegments_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]stats.SegmentBelongs, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(stats.SegmentBelongs)
}
}
run(args[0].(context.Context), variadicArgs...)
})
return _c
}
func (_c *MockSealOperator_TryToSealSegments_Call) Return() *MockSealOperator_TryToSealSegments_Call {
_c.Call.Return()
return _c
}
func (_c *MockSealOperator_TryToSealSegments_Call) RunAndReturn(run func(context.Context, ...stats.SegmentBelongs)) *MockSealOperator_TryToSealSegments_Call {
_c.Run(run)
return _c
}
// TryToSealWaitedSegment provides a mock function with given fields: ctx
func (_m *MockSealOperator) TryToSealWaitedSegment(ctx context.Context) {
_m.Called(ctx)
}
// MockSealOperator_TryToSealWaitedSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TryToSealWaitedSegment'
type MockSealOperator_TryToSealWaitedSegment_Call struct {
*mock.Call
}
// TryToSealWaitedSegment is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockSealOperator_Expecter) TryToSealWaitedSegment(ctx interface{}) *MockSealOperator_TryToSealWaitedSegment_Call {
return &MockSealOperator_TryToSealWaitedSegment_Call{Call: _e.mock.On("TryToSealWaitedSegment", ctx)}
}
func (_c *MockSealOperator_TryToSealWaitedSegment_Call) Run(run func(ctx context.Context)) *MockSealOperator_TryToSealWaitedSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context))
})
return _c
}
func (_c *MockSealOperator_TryToSealWaitedSegment_Call) Return() *MockSealOperator_TryToSealWaitedSegment_Call {
_c.Call.Return()
return _c
}
func (_c *MockSealOperator_TryToSealWaitedSegment_Call) RunAndReturn(run func(context.Context)) *MockSealOperator_TryToSealWaitedSegment_Call {
_c.Run(run)
return _c
}
// NewMockSealOperator creates a new instance of MockSealOperator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockSealOperator(t interface {
mock.TestingT
Cleanup(func())
}) *MockSealOperator {
mock := &MockSealOperator{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,855 @@
// Code generated by mockery v2.53.3. DO NOT EDIT.
package mock_shards
import (
log "github.com/milvus-io/milvus/pkg/v2/log"
message "github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
mock "github.com/stretchr/testify/mock"
shards "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
types "github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
utils "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/utils"
)
// MockShardManager is an autogenerated mock type for the ShardManager type
type MockShardManager struct {
mock.Mock
}
type MockShardManager_Expecter struct {
mock *mock.Mock
}
func (_m *MockShardManager) EXPECT() *MockShardManager_Expecter {
return &MockShardManager_Expecter{mock: &_m.Mock}
}
// AssignSegment provides a mock function with given fields: req
func (_m *MockShardManager) AssignSegment(req *shards.AssignSegmentRequest) (*shards.AssignSegmentResult, error) {
ret := _m.Called(req)
if len(ret) == 0 {
panic("no return value specified for AssignSegment")
}
var r0 *shards.AssignSegmentResult
var r1 error
if rf, ok := ret.Get(0).(func(*shards.AssignSegmentRequest) (*shards.AssignSegmentResult, error)); ok {
return rf(req)
}
if rf, ok := ret.Get(0).(func(*shards.AssignSegmentRequest) *shards.AssignSegmentResult); ok {
r0 = rf(req)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*shards.AssignSegmentResult)
}
}
if rf, ok := ret.Get(1).(func(*shards.AssignSegmentRequest) error); ok {
r1 = rf(req)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockShardManager_AssignSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AssignSegment'
type MockShardManager_AssignSegment_Call struct {
*mock.Call
}
// AssignSegment is a helper method to define mock.On call
// - req *shards.AssignSegmentRequest
func (_e *MockShardManager_Expecter) AssignSegment(req interface{}) *MockShardManager_AssignSegment_Call {
return &MockShardManager_AssignSegment_Call{Call: _e.mock.On("AssignSegment", req)}
}
func (_c *MockShardManager_AssignSegment_Call) Run(run func(req *shards.AssignSegmentRequest)) *MockShardManager_AssignSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(*shards.AssignSegmentRequest))
})
return _c
}
func (_c *MockShardManager_AssignSegment_Call) Return(_a0 *shards.AssignSegmentResult, _a1 error) *MockShardManager_AssignSegment_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockShardManager_AssignSegment_Call) RunAndReturn(run func(*shards.AssignSegmentRequest) (*shards.AssignSegmentResult, error)) *MockShardManager_AssignSegment_Call {
_c.Call.Return(run)
return _c
}
// AsyncFlushSegment provides a mock function with given fields: signal
func (_m *MockShardManager) AsyncFlushSegment(signal utils.SealSegmentSignal) {
_m.Called(signal)
}
// MockShardManager_AsyncFlushSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsyncFlushSegment'
type MockShardManager_AsyncFlushSegment_Call struct {
*mock.Call
}
// AsyncFlushSegment is a helper method to define mock.On call
// - signal utils.SealSegmentSignal
func (_e *MockShardManager_Expecter) AsyncFlushSegment(signal interface{}) *MockShardManager_AsyncFlushSegment_Call {
return &MockShardManager_AsyncFlushSegment_Call{Call: _e.mock.On("AsyncFlushSegment", signal)}
}
func (_c *MockShardManager_AsyncFlushSegment_Call) Run(run func(signal utils.SealSegmentSignal)) *MockShardManager_AsyncFlushSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(utils.SealSegmentSignal))
})
return _c
}
func (_c *MockShardManager_AsyncFlushSegment_Call) Return() *MockShardManager_AsyncFlushSegment_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_AsyncFlushSegment_Call) RunAndReturn(run func(utils.SealSegmentSignal)) *MockShardManager_AsyncFlushSegment_Call {
_c.Run(run)
return _c
}
// Channel provides a mock function with no fields
func (_m *MockShardManager) Channel() types.PChannelInfo {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Channel")
}
var r0 types.PChannelInfo
if rf, ok := ret.Get(0).(func() types.PChannelInfo); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(types.PChannelInfo)
}
return r0
}
// MockShardManager_Channel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Channel'
type MockShardManager_Channel_Call struct {
*mock.Call
}
// Channel is a helper method to define mock.On call
func (_e *MockShardManager_Expecter) Channel() *MockShardManager_Channel_Call {
return &MockShardManager_Channel_Call{Call: _e.mock.On("Channel")}
}
func (_c *MockShardManager_Channel_Call) Run(run func()) *MockShardManager_Channel_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockShardManager_Channel_Call) Return(_a0 types.PChannelInfo) *MockShardManager_Channel_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_Channel_Call) RunAndReturn(run func() types.PChannelInfo) *MockShardManager_Channel_Call {
_c.Call.Return(run)
return _c
}
// CheckIfCollectionCanBeCreated provides a mock function with given fields: collectionID
func (_m *MockShardManager) CheckIfCollectionCanBeCreated(collectionID int64) error {
ret := _m.Called(collectionID)
if len(ret) == 0 {
panic("no return value specified for CheckIfCollectionCanBeCreated")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64) error); ok {
r0 = rf(collectionID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfCollectionCanBeCreated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfCollectionCanBeCreated'
type MockShardManager_CheckIfCollectionCanBeCreated_Call struct {
*mock.Call
}
// CheckIfCollectionCanBeCreated is a helper method to define mock.On call
// - collectionID int64
func (_e *MockShardManager_Expecter) CheckIfCollectionCanBeCreated(collectionID interface{}) *MockShardManager_CheckIfCollectionCanBeCreated_Call {
return &MockShardManager_CheckIfCollectionCanBeCreated_Call{Call: _e.mock.On("CheckIfCollectionCanBeCreated", collectionID)}
}
func (_c *MockShardManager_CheckIfCollectionCanBeCreated_Call) Run(run func(collectionID int64)) *MockShardManager_CheckIfCollectionCanBeCreated_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfCollectionCanBeCreated_Call) Return(_a0 error) *MockShardManager_CheckIfCollectionCanBeCreated_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfCollectionCanBeCreated_Call) RunAndReturn(run func(int64) error) *MockShardManager_CheckIfCollectionCanBeCreated_Call {
_c.Call.Return(run)
return _c
}
// CheckIfCollectionExists provides a mock function with given fields: collectionID
func (_m *MockShardManager) CheckIfCollectionExists(collectionID int64) error {
ret := _m.Called(collectionID)
if len(ret) == 0 {
panic("no return value specified for CheckIfCollectionExists")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64) error); ok {
r0 = rf(collectionID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfCollectionExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfCollectionExists'
type MockShardManager_CheckIfCollectionExists_Call struct {
*mock.Call
}
// CheckIfCollectionExists is a helper method to define mock.On call
// - collectionID int64
func (_e *MockShardManager_Expecter) CheckIfCollectionExists(collectionID interface{}) *MockShardManager_CheckIfCollectionExists_Call {
return &MockShardManager_CheckIfCollectionExists_Call{Call: _e.mock.On("CheckIfCollectionExists", collectionID)}
}
func (_c *MockShardManager_CheckIfCollectionExists_Call) Run(run func(collectionID int64)) *MockShardManager_CheckIfCollectionExists_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfCollectionExists_Call) Return(_a0 error) *MockShardManager_CheckIfCollectionExists_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfCollectionExists_Call) RunAndReturn(run func(int64) error) *MockShardManager_CheckIfCollectionExists_Call {
_c.Call.Return(run)
return _c
}
// CheckIfPartitionCanBeCreated provides a mock function with given fields: collectionID, partitionID
func (_m *MockShardManager) CheckIfPartitionCanBeCreated(collectionID int64, partitionID int64) error {
ret := _m.Called(collectionID, partitionID)
if len(ret) == 0 {
panic("no return value specified for CheckIfPartitionCanBeCreated")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64, int64) error); ok {
r0 = rf(collectionID, partitionID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfPartitionCanBeCreated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfPartitionCanBeCreated'
type MockShardManager_CheckIfPartitionCanBeCreated_Call struct {
*mock.Call
}
// CheckIfPartitionCanBeCreated is a helper method to define mock.On call
// - collectionID int64
// - partitionID int64
func (_e *MockShardManager_Expecter) CheckIfPartitionCanBeCreated(collectionID interface{}, partitionID interface{}) *MockShardManager_CheckIfPartitionCanBeCreated_Call {
return &MockShardManager_CheckIfPartitionCanBeCreated_Call{Call: _e.mock.On("CheckIfPartitionCanBeCreated", collectionID, partitionID)}
}
func (_c *MockShardManager_CheckIfPartitionCanBeCreated_Call) Run(run func(collectionID int64, partitionID int64)) *MockShardManager_CheckIfPartitionCanBeCreated_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfPartitionCanBeCreated_Call) Return(_a0 error) *MockShardManager_CheckIfPartitionCanBeCreated_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfPartitionCanBeCreated_Call) RunAndReturn(run func(int64, int64) error) *MockShardManager_CheckIfPartitionCanBeCreated_Call {
_c.Call.Return(run)
return _c
}
// CheckIfPartitionExists provides a mock function with given fields: collectionID, partitionID
func (_m *MockShardManager) CheckIfPartitionExists(collectionID int64, partitionID int64) error {
ret := _m.Called(collectionID, partitionID)
if len(ret) == 0 {
panic("no return value specified for CheckIfPartitionExists")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64, int64) error); ok {
r0 = rf(collectionID, partitionID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfPartitionExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfPartitionExists'
type MockShardManager_CheckIfPartitionExists_Call struct {
*mock.Call
}
// CheckIfPartitionExists is a helper method to define mock.On call
// - collectionID int64
// - partitionID int64
func (_e *MockShardManager_Expecter) CheckIfPartitionExists(collectionID interface{}, partitionID interface{}) *MockShardManager_CheckIfPartitionExists_Call {
return &MockShardManager_CheckIfPartitionExists_Call{Call: _e.mock.On("CheckIfPartitionExists", collectionID, partitionID)}
}
func (_c *MockShardManager_CheckIfPartitionExists_Call) Run(run func(collectionID int64, partitionID int64)) *MockShardManager_CheckIfPartitionExists_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfPartitionExists_Call) Return(_a0 error) *MockShardManager_CheckIfPartitionExists_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfPartitionExists_Call) RunAndReturn(run func(int64, int64) error) *MockShardManager_CheckIfPartitionExists_Call {
_c.Call.Return(run)
return _c
}
// CheckIfSegmentCanBeCreated provides a mock function with given fields: collectionID, partitionID, segmentID
func (_m *MockShardManager) CheckIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error {
ret := _m.Called(collectionID, partitionID, segmentID)
if len(ret) == 0 {
panic("no return value specified for CheckIfSegmentCanBeCreated")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64, int64, int64) error); ok {
r0 = rf(collectionID, partitionID, segmentID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfSegmentCanBeCreated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfSegmentCanBeCreated'
type MockShardManager_CheckIfSegmentCanBeCreated_Call struct {
*mock.Call
}
// CheckIfSegmentCanBeCreated is a helper method to define mock.On call
// - collectionID int64
// - partitionID int64
// - segmentID int64
func (_e *MockShardManager_Expecter) CheckIfSegmentCanBeCreated(collectionID interface{}, partitionID interface{}, segmentID interface{}) *MockShardManager_CheckIfSegmentCanBeCreated_Call {
return &MockShardManager_CheckIfSegmentCanBeCreated_Call{Call: _e.mock.On("CheckIfSegmentCanBeCreated", collectionID, partitionID, segmentID)}
}
func (_c *MockShardManager_CheckIfSegmentCanBeCreated_Call) Run(run func(collectionID int64, partitionID int64, segmentID int64)) *MockShardManager_CheckIfSegmentCanBeCreated_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(int64), args[2].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfSegmentCanBeCreated_Call) Return(_a0 error) *MockShardManager_CheckIfSegmentCanBeCreated_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfSegmentCanBeCreated_Call) RunAndReturn(run func(int64, int64, int64) error) *MockShardManager_CheckIfSegmentCanBeCreated_Call {
_c.Call.Return(run)
return _c
}
// CheckIfSegmentCanBeFlushed provides a mock function with given fields: collecionID, partitionID, segmentID
func (_m *MockShardManager) CheckIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error {
ret := _m.Called(collecionID, partitionID, segmentID)
if len(ret) == 0 {
panic("no return value specified for CheckIfSegmentCanBeFlushed")
}
var r0 error
if rf, ok := ret.Get(0).(func(int64, int64, int64) error); ok {
r0 = rf(collecionID, partitionID, segmentID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardManager_CheckIfSegmentCanBeFlushed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckIfSegmentCanBeFlushed'
type MockShardManager_CheckIfSegmentCanBeFlushed_Call struct {
*mock.Call
}
// CheckIfSegmentCanBeFlushed is a helper method to define mock.On call
// - collecionID int64
// - partitionID int64
// - segmentID int64
func (_e *MockShardManager_Expecter) CheckIfSegmentCanBeFlushed(collecionID interface{}, partitionID interface{}, segmentID interface{}) *MockShardManager_CheckIfSegmentCanBeFlushed_Call {
return &MockShardManager_CheckIfSegmentCanBeFlushed_Call{Call: _e.mock.On("CheckIfSegmentCanBeFlushed", collecionID, partitionID, segmentID)}
}
func (_c *MockShardManager_CheckIfSegmentCanBeFlushed_Call) Run(run func(collecionID int64, partitionID int64, segmentID int64)) *MockShardManager_CheckIfSegmentCanBeFlushed_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(int64), args[2].(int64))
})
return _c
}
func (_c *MockShardManager_CheckIfSegmentCanBeFlushed_Call) Return(_a0 error) *MockShardManager_CheckIfSegmentCanBeFlushed_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_CheckIfSegmentCanBeFlushed_Call) RunAndReturn(run func(int64, int64, int64) error) *MockShardManager_CheckIfSegmentCanBeFlushed_Call {
_c.Call.Return(run)
return _c
}
// Close provides a mock function with no fields
func (_m *MockShardManager) Close() {
_m.Called()
}
// MockShardManager_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
type MockShardManager_Close_Call struct {
*mock.Call
}
// Close is a helper method to define mock.On call
func (_e *MockShardManager_Expecter) Close() *MockShardManager_Close_Call {
return &MockShardManager_Close_Call{Call: _e.mock.On("Close")}
}
func (_c *MockShardManager_Close_Call) Run(run func()) *MockShardManager_Close_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockShardManager_Close_Call) Return() *MockShardManager_Close_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_Close_Call) RunAndReturn(run func()) *MockShardManager_Close_Call {
_c.Run(run)
return _c
}
// CreateCollection provides a mock function with given fields: msg
func (_m *MockShardManager) CreateCollection(msg message.ImmutableCreateCollectionMessageV1) {
_m.Called(msg)
}
// MockShardManager_CreateCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateCollection'
type MockShardManager_CreateCollection_Call struct {
*mock.Call
}
// CreateCollection is a helper method to define mock.On call
// - msg message.ImmutableCreateCollectionMessageV1
func (_e *MockShardManager_Expecter) CreateCollection(msg interface{}) *MockShardManager_CreateCollection_Call {
return &MockShardManager_CreateCollection_Call{Call: _e.mock.On("CreateCollection", msg)}
}
func (_c *MockShardManager_CreateCollection_Call) Run(run func(msg message.ImmutableCreateCollectionMessageV1)) *MockShardManager_CreateCollection_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableCreateCollectionMessageV1))
})
return _c
}
func (_c *MockShardManager_CreateCollection_Call) Return() *MockShardManager_CreateCollection_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_CreateCollection_Call) RunAndReturn(run func(message.ImmutableCreateCollectionMessageV1)) *MockShardManager_CreateCollection_Call {
_c.Run(run)
return _c
}
// CreatePartition provides a mock function with given fields: msg
func (_m *MockShardManager) CreatePartition(msg message.ImmutableCreatePartitionMessageV1) {
_m.Called(msg)
}
// MockShardManager_CreatePartition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreatePartition'
type MockShardManager_CreatePartition_Call struct {
*mock.Call
}
// CreatePartition is a helper method to define mock.On call
// - msg message.ImmutableCreatePartitionMessageV1
func (_e *MockShardManager_Expecter) CreatePartition(msg interface{}) *MockShardManager_CreatePartition_Call {
return &MockShardManager_CreatePartition_Call{Call: _e.mock.On("CreatePartition", msg)}
}
func (_c *MockShardManager_CreatePartition_Call) Run(run func(msg message.ImmutableCreatePartitionMessageV1)) *MockShardManager_CreatePartition_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableCreatePartitionMessageV1))
})
return _c
}
func (_c *MockShardManager_CreatePartition_Call) Return() *MockShardManager_CreatePartition_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_CreatePartition_Call) RunAndReturn(run func(message.ImmutableCreatePartitionMessageV1)) *MockShardManager_CreatePartition_Call {
_c.Run(run)
return _c
}
// CreateSegment provides a mock function with given fields: msg
func (_m *MockShardManager) CreateSegment(msg message.ImmutableCreateSegmentMessageV2) {
_m.Called(msg)
}
// MockShardManager_CreateSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSegment'
type MockShardManager_CreateSegment_Call struct {
*mock.Call
}
// CreateSegment is a helper method to define mock.On call
// - msg message.ImmutableCreateSegmentMessageV2
func (_e *MockShardManager_Expecter) CreateSegment(msg interface{}) *MockShardManager_CreateSegment_Call {
return &MockShardManager_CreateSegment_Call{Call: _e.mock.On("CreateSegment", msg)}
}
func (_c *MockShardManager_CreateSegment_Call) Run(run func(msg message.ImmutableCreateSegmentMessageV2)) *MockShardManager_CreateSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableCreateSegmentMessageV2))
})
return _c
}
func (_c *MockShardManager_CreateSegment_Call) Return() *MockShardManager_CreateSegment_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_CreateSegment_Call) RunAndReturn(run func(message.ImmutableCreateSegmentMessageV2)) *MockShardManager_CreateSegment_Call {
_c.Run(run)
return _c
}
// DropCollection provides a mock function with given fields: msg
func (_m *MockShardManager) DropCollection(msg message.ImmutableDropCollectionMessageV1) {
_m.Called(msg)
}
// MockShardManager_DropCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropCollection'
type MockShardManager_DropCollection_Call struct {
*mock.Call
}
// DropCollection is a helper method to define mock.On call
// - msg message.ImmutableDropCollectionMessageV1
func (_e *MockShardManager_Expecter) DropCollection(msg interface{}) *MockShardManager_DropCollection_Call {
return &MockShardManager_DropCollection_Call{Call: _e.mock.On("DropCollection", msg)}
}
func (_c *MockShardManager_DropCollection_Call) Run(run func(msg message.ImmutableDropCollectionMessageV1)) *MockShardManager_DropCollection_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableDropCollectionMessageV1))
})
return _c
}
func (_c *MockShardManager_DropCollection_Call) Return() *MockShardManager_DropCollection_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_DropCollection_Call) RunAndReturn(run func(message.ImmutableDropCollectionMessageV1)) *MockShardManager_DropCollection_Call {
_c.Run(run)
return _c
}
// DropPartition provides a mock function with given fields: msg
func (_m *MockShardManager) DropPartition(msg message.ImmutableDropPartitionMessageV1) {
_m.Called(msg)
}
// MockShardManager_DropPartition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropPartition'
type MockShardManager_DropPartition_Call struct {
*mock.Call
}
// DropPartition is a helper method to define mock.On call
// - msg message.ImmutableDropPartitionMessageV1
func (_e *MockShardManager_Expecter) DropPartition(msg interface{}) *MockShardManager_DropPartition_Call {
return &MockShardManager_DropPartition_Call{Call: _e.mock.On("DropPartition", msg)}
}
func (_c *MockShardManager_DropPartition_Call) Run(run func(msg message.ImmutableDropPartitionMessageV1)) *MockShardManager_DropPartition_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableDropPartitionMessageV1))
})
return _c
}
func (_c *MockShardManager_DropPartition_Call) Return() *MockShardManager_DropPartition_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_DropPartition_Call) RunAndReturn(run func(message.ImmutableDropPartitionMessageV1)) *MockShardManager_DropPartition_Call {
_c.Run(run)
return _c
}
// FlushAndFenceSegmentAllocUntil provides a mock function with given fields: collectionID, timetick
func (_m *MockShardManager) FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error) {
ret := _m.Called(collectionID, timetick)
if len(ret) == 0 {
panic("no return value specified for FlushAndFenceSegmentAllocUntil")
}
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(int64, uint64) ([]int64, error)); ok {
return rf(collectionID, timetick)
}
if rf, ok := ret.Get(0).(func(int64, uint64) []int64); ok {
r0 = rf(collectionID, timetick)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(int64, uint64) error); ok {
r1 = rf(collectionID, timetick)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockShardManager_FlushAndFenceSegmentAllocUntil_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushAndFenceSegmentAllocUntil'
type MockShardManager_FlushAndFenceSegmentAllocUntil_Call struct {
*mock.Call
}
// FlushAndFenceSegmentAllocUntil is a helper method to define mock.On call
// - collectionID int64
// - timetick uint64
func (_e *MockShardManager_Expecter) FlushAndFenceSegmentAllocUntil(collectionID interface{}, timetick interface{}) *MockShardManager_FlushAndFenceSegmentAllocUntil_Call {
return &MockShardManager_FlushAndFenceSegmentAllocUntil_Call{Call: _e.mock.On("FlushAndFenceSegmentAllocUntil", collectionID, timetick)}
}
func (_c *MockShardManager_FlushAndFenceSegmentAllocUntil_Call) Run(run func(collectionID int64, timetick uint64)) *MockShardManager_FlushAndFenceSegmentAllocUntil_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(uint64))
})
return _c
}
func (_c *MockShardManager_FlushAndFenceSegmentAllocUntil_Call) Return(_a0 []int64, _a1 error) *MockShardManager_FlushAndFenceSegmentAllocUntil_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockShardManager_FlushAndFenceSegmentAllocUntil_Call) RunAndReturn(run func(int64, uint64) ([]int64, error)) *MockShardManager_FlushAndFenceSegmentAllocUntil_Call {
_c.Call.Return(run)
return _c
}
// FlushSegment provides a mock function with given fields: msg
func (_m *MockShardManager) FlushSegment(msg message.ImmutableFlushMessageV2) {
_m.Called(msg)
}
// MockShardManager_FlushSegment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushSegment'
type MockShardManager_FlushSegment_Call struct {
*mock.Call
}
// FlushSegment is a helper method to define mock.On call
// - msg message.ImmutableFlushMessageV2
func (_e *MockShardManager_Expecter) FlushSegment(msg interface{}) *MockShardManager_FlushSegment_Call {
return &MockShardManager_FlushSegment_Call{Call: _e.mock.On("FlushSegment", msg)}
}
func (_c *MockShardManager_FlushSegment_Call) Run(run func(msg message.ImmutableFlushMessageV2)) *MockShardManager_FlushSegment_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableFlushMessageV2))
})
return _c
}
func (_c *MockShardManager_FlushSegment_Call) Return() *MockShardManager_FlushSegment_Call {
_c.Call.Return()
return _c
}
func (_c *MockShardManager_FlushSegment_Call) RunAndReturn(run func(message.ImmutableFlushMessageV2)) *MockShardManager_FlushSegment_Call {
_c.Run(run)
return _c
}
// Logger provides a mock function with no fields
func (_m *MockShardManager) Logger() *log.MLogger {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Logger")
}
var r0 *log.MLogger
if rf, ok := ret.Get(0).(func() *log.MLogger); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*log.MLogger)
}
}
return r0
}
// MockShardManager_Logger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Logger'
type MockShardManager_Logger_Call struct {
*mock.Call
}
// Logger is a helper method to define mock.On call
func (_e *MockShardManager_Expecter) Logger() *MockShardManager_Logger_Call {
return &MockShardManager_Logger_Call{Call: _e.mock.On("Logger")}
}
func (_c *MockShardManager_Logger_Call) Run(run func()) *MockShardManager_Logger_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockShardManager_Logger_Call) Return(_a0 *log.MLogger) *MockShardManager_Logger_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardManager_Logger_Call) RunAndReturn(run func() *log.MLogger) *MockShardManager_Logger_Call {
_c.Call.Return(run)
return _c
}
// WaitUntilGrowingSegmentReady provides a mock function with given fields: collectionID, partitonID
func (_m *MockShardManager) WaitUntilGrowingSegmentReady(collectionID int64, partitonID int64) (<-chan struct{}, error) {
ret := _m.Called(collectionID, partitonID)
if len(ret) == 0 {
panic("no return value specified for WaitUntilGrowingSegmentReady")
}
var r0 <-chan struct{}
var r1 error
if rf, ok := ret.Get(0).(func(int64, int64) (<-chan struct{}, error)); ok {
return rf(collectionID, partitonID)
}
if rf, ok := ret.Get(0).(func(int64, int64) <-chan struct{}); ok {
r0 = rf(collectionID, partitonID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan struct{})
}
}
if rf, ok := ret.Get(1).(func(int64, int64) error); ok {
r1 = rf(collectionID, partitonID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockShardManager_WaitUntilGrowingSegmentReady_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitUntilGrowingSegmentReady'
type MockShardManager_WaitUntilGrowingSegmentReady_Call struct {
*mock.Call
}
// WaitUntilGrowingSegmentReady is a helper method to define mock.On call
// - collectionID int64
// - partitonID int64
func (_e *MockShardManager_Expecter) WaitUntilGrowingSegmentReady(collectionID interface{}, partitonID interface{}) *MockShardManager_WaitUntilGrowingSegmentReady_Call {
return &MockShardManager_WaitUntilGrowingSegmentReady_Call{Call: _e.mock.On("WaitUntilGrowingSegmentReady", collectionID, partitonID)}
}
func (_c *MockShardManager_WaitUntilGrowingSegmentReady_Call) Run(run func(collectionID int64, partitonID int64)) *MockShardManager_WaitUntilGrowingSegmentReady_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64), args[1].(int64))
})
return _c
}
func (_c *MockShardManager_WaitUntilGrowingSegmentReady_Call) Return(_a0 <-chan struct{}, _a1 error) *MockShardManager_WaitUntilGrowingSegmentReady_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockShardManager_WaitUntilGrowingSegmentReady_Call) RunAndReturn(run func(int64, int64) (<-chan struct{}, error)) *MockShardManager_WaitUntilGrowingSegmentReady_Call {
_c.Call.Return(run)
return _c
}
// NewMockShardManager creates a new instance of MockShardManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockShardManager(t interface {
mock.TestingT
Cleanup(func())
}) *MockShardManager {
mock := &MockShardManager{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,100 @@
// Code generated by mockery v2.53.3. DO NOT EDIT.
package mock_recovery
import (
message "github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
mock "github.com/stretchr/testify/mock"
)
// MockRecoveryStorage is an autogenerated mock type for the RecoveryStorage type
type MockRecoveryStorage struct {
mock.Mock
}
type MockRecoveryStorage_Expecter struct {
mock *mock.Mock
}
func (_m *MockRecoveryStorage) EXPECT() *MockRecoveryStorage_Expecter {
return &MockRecoveryStorage_Expecter{mock: &_m.Mock}
}
// Close provides a mock function with no fields
func (_m *MockRecoveryStorage) Close() {
_m.Called()
}
// MockRecoveryStorage_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
type MockRecoveryStorage_Close_Call struct {
*mock.Call
}
// Close is a helper method to define mock.On call
func (_e *MockRecoveryStorage_Expecter) Close() *MockRecoveryStorage_Close_Call {
return &MockRecoveryStorage_Close_Call{Call: _e.mock.On("Close")}
}
func (_c *MockRecoveryStorage_Close_Call) Run(run func()) *MockRecoveryStorage_Close_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockRecoveryStorage_Close_Call) Return() *MockRecoveryStorage_Close_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecoveryStorage_Close_Call) RunAndReturn(run func()) *MockRecoveryStorage_Close_Call {
_c.Run(run)
return _c
}
// ObserveMessage provides a mock function with given fields: msg
func (_m *MockRecoveryStorage) ObserveMessage(msg message.ImmutableMessage) {
_m.Called(msg)
}
// MockRecoveryStorage_ObserveMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObserveMessage'
type MockRecoveryStorage_ObserveMessage_Call struct {
*mock.Call
}
// ObserveMessage is a helper method to define mock.On call
// - msg message.ImmutableMessage
func (_e *MockRecoveryStorage_Expecter) ObserveMessage(msg interface{}) *MockRecoveryStorage_ObserveMessage_Call {
return &MockRecoveryStorage_ObserveMessage_Call{Call: _e.mock.On("ObserveMessage", msg)}
}
func (_c *MockRecoveryStorage_ObserveMessage_Call) Run(run func(msg message.ImmutableMessage)) *MockRecoveryStorage_ObserveMessage_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(message.ImmutableMessage))
})
return _c
}
func (_c *MockRecoveryStorage_ObserveMessage_Call) Return() *MockRecoveryStorage_ObserveMessage_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecoveryStorage_ObserveMessage_Call) RunAndReturn(run func(message.ImmutableMessage)) *MockRecoveryStorage_ObserveMessage_Call {
_c.Run(run)
return _c
}
// NewMockRecoveryStorage creates a new instance of MockRecoveryStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRecoveryStorage(t interface {
mock.TestingT
Cleanup(func())
}) *MockRecoveryStorage {
mock := &MockRecoveryStorage{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -158,6 +158,8 @@ func (hc *handlerClientImpl) createHandlerAfterStreamingNodeReady(ctx context.Co
backoff.InitialInterval = 100 * time.Millisecond
backoff.MaxInterval = 10 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
for {
assign := hc.watcher.Get(ctx, pchannel)
if assign != nil {

View File

@ -15,7 +15,7 @@ import (
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
"github.com/milvus-io/milvus/internal/util/idalloc"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
@ -84,7 +84,7 @@ func (impl *flusherComponents) WhenCreateCollection(createCollectionMsg message.
}
if tt, ok := t.(*syncmgr.SyncTask); ok {
insertLogs, _, _, _ := tt.Binlogs()
resource.Resource().SegmentAssignStatsManager().UpdateOnSync(tt.SegmentID(), stats.SyncOperationMetrics{
resource.Resource().SegmentStatsManager().UpdateOnSync(tt.SegmentID(), stats.SyncOperationMetrics{
BinLogCounterIncr: 1,
BinLogFileCounterIncr: uint64(len(insertLogs)),
})
@ -258,7 +258,7 @@ func (impl *flusherComponents) buildDataSyncService(ctx context.Context, recover
}
if tt, ok := t.(*syncmgr.SyncTask); ok {
insertLogs, _, _, _ := tt.Binlogs()
resource.Resource().SegmentAssignStatsManager().UpdateOnSync(tt.SegmentID(), stats.SyncOperationMetrics{
resource.Resource().SegmentStatsManager().UpdateOnSync(tt.SegmentID(), stats.SyncOperationMetrics{
BinLogCounterIncr: 1,
BinLogFileCounterIncr: uint64(len(insertLogs)),
})

View File

@ -4,25 +4,35 @@ import (
"context"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/flushcommon/broker"
"github.com/milvus-io/milvus/internal/flushcommon/util"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message/adaptor"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/options"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
)
var errChannelLifetimeUnrecoverable = errors.New("channel lifetime unrecoverable")
// RecoverWALFlusherParam is the parameter for building wal flusher.
type RecoverWALFlusherParam struct {
ChannelInfo types.PChannelInfo
WAL *syncutil.Future[wal.WAL]
RecoverySnapshot *recovery.RecoverySnapshot
RecoveryStorage recovery.RecoveryStorage
}
// RecoverWALFlusher recovers the wal flusher.
func RecoverWALFlusher(param *interceptors.InterceptorBuildParam) *WALFlusherImpl {
func RecoverWALFlusher(param *RecoverWALFlusherParam) *WALFlusherImpl {
flusher := &WALFlusherImpl{
notifier: syncutil.NewAsyncTaskNotifier[struct{}](),
wal: param.WAL,
@ -30,8 +40,9 @@ func RecoverWALFlusher(param *interceptors.InterceptorBuildParam) *WALFlusherImp
log.FieldComponent("flusher"),
zap.String("pchannel", param.ChannelInfo.String())),
metrics: newFlusherMetrics(param.ChannelInfo),
rs: param.RecoveryStorage,
}
go flusher.Execute()
go flusher.Execute(param.RecoverySnapshot)
return flusher
}
@ -41,10 +52,11 @@ type WALFlusherImpl struct {
flusherComponents *flusherComponents
logger *log.MLogger
metrics *flusherMetrics
rs recovery.RecoveryStorage
}
// Execute starts the wal flusher.
func (impl *WALFlusherImpl) Execute() (err error) {
func (impl *WALFlusherImpl) Execute(recoverSnapshot *recovery.RecoverySnapshot) (err error) {
defer func() {
impl.notifier.Finish(struct{}{})
if err == nil {
@ -66,7 +78,7 @@ func (impl *WALFlusherImpl) Execute() (err error) {
impl.logger.Info("wal ready for flusher recovery")
var checkpoint message.MessageID
impl.flusherComponents, checkpoint, err = impl.buildFlusherComponents(impl.notifier.Context(), l)
impl.flusherComponents, checkpoint, err = impl.buildFlusherComponents(impl.notifier.Context(), l, recoverSnapshot)
if err != nil {
return errors.Wrap(err, "when build flusher components")
}
@ -104,17 +116,18 @@ func (impl *WALFlusherImpl) Execute() (err error) {
func (impl *WALFlusherImpl) Close() {
impl.notifier.Cancel()
impl.notifier.BlockUntilFinish()
impl.logger.Info("wal flusher start to close the recovery storage...")
impl.rs.Close()
impl.logger.Info("recovery storage closed")
impl.metrics.Close()
}
// buildFlusherComponents builds the components of the flusher.
func (impl *WALFlusherImpl) buildFlusherComponents(ctx context.Context, l wal.WAL) (*flusherComponents, message.MessageID, error) {
func (impl *WALFlusherImpl) buildFlusherComponents(ctx context.Context, l wal.WAL, snapshot *recovery.RecoverySnapshot) (*flusherComponents, message.MessageID, error) {
// Get all existed vchannels of the pchannel.
vchannels, err := impl.getVchannels(ctx, l.Channel().Name)
if err != nil {
impl.logger.Warn("get vchannels failed", zap.Error(err))
return nil, nil, err
}
vchannels := lo.Keys(snapshot.VChannels)
impl.logger.Info("fetch vchannel done", zap.Int("vchannelNum", len(vchannels)))
// Get all the recovery info of the recoverable vchannels.
@ -177,6 +190,10 @@ func (impl *WALFlusherImpl) generateScanner(ctx context.Context, l wal.WAL, chec
// dispatch dispatches the message to the related handler for flusher components.
func (impl *WALFlusherImpl) dispatch(msg message.ImmutableMessage) error {
// TODO: We will merge the flusher into recovery storage in future.
// Currently, flusher works as a separate component.
defer impl.rs.ObserveMessage(msg)
// Do the data sync service management here.
switch msg.MessageType() {
case message.MessageTypeCreateCollection:
@ -189,7 +206,9 @@ func (impl *WALFlusherImpl) dispatch(msg message.ImmutableMessage) error {
case message.MessageTypeDropCollection:
// defer to remove the data sync service from the components.
// TODO: Current drop collection message will be handled by the underlying data sync service.
defer impl.flusherComponents.WhenDropCollection(msg.VChannel())
defer func() {
impl.flusherComponents.WhenDropCollection(msg.VChannel())
}()
}
return impl.flusherComponents.HandleMessage(impl.notifier.Context(), msg)
}

View File

@ -16,14 +16,15 @@ import (
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/internal/mocks/mock_storage"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/mock_wal"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/mock_recovery"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery"
internaltypes "github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/streamingutil"
"github.com/milvus-io/milvus/pkg/v2/common"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
@ -44,21 +45,6 @@ func TestWALFlusher(t *testing.T) {
defer streamingutil.UnsetStreamingServiceEnabled()
mixcoord := newMockMixcoord(t, false)
mixcoord.EXPECT().GetPChannelInfo(mock.Anything, mock.Anything).Return(&rootcoordpb.GetPChannelInfoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Collections: []*rootcoordpb.CollectionInfoOnPChannel{
{
CollectionId: 100,
Vchannel: "vchannel-1",
},
{
CollectionId: 100,
Vchannel: "vchannel-2",
},
},
}, nil)
mixcoord.EXPECT().AllocSegment(mock.Anything, mock.Anything).Return(&datapb.AllocSegmentResponse{
Status: merr.Status(nil),
}, nil)
@ -67,14 +53,33 @@ func TestWALFlusher(t *testing.T) {
}, nil)
fMixcoord := syncutil.NewFuture[internaltypes.MixCoordClient]()
fMixcoord.Set(mixcoord)
rs := mock_recovery.NewMockRecoveryStorage(t)
rs.EXPECT().ObserveMessage(mock.Anything).Return()
rs.EXPECT().Close().Return()
resource.InitForTest(
t,
resource.OptMixCoordClient(fMixcoord),
resource.OptChunkManager(mock_storage.NewMockChunkManager(t)),
)
l := newMockWAL(t, false)
param := &interceptors.InterceptorBuildParam{
param := &RecoverWALFlusherParam{
ChannelInfo: l.Channel(),
WAL: syncutil.NewFuture[wal.WAL](),
RecoverySnapshot: &recovery.RecoverySnapshot{
VChannels: map[string]*streamingpb.VChannelMeta{
"vchannel-1": {
CollectionInfo: &streamingpb.CollectionInfoOfVChannel{
CollectionId: 100,
},
},
"vchannel-2": {
CollectionInfo: &streamingpb.CollectionInfoOfVChannel{
CollectionId: 100,
},
},
},
},
RecoveryStorage: rs,
}
param.WAL.Set(l)
flusher := RecoverWALFlusher(param)

View File

@ -9,8 +9,7 @@ import (
"github.com/milvus-io/milvus/internal/flushcommon/writebuffer"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
shardstats "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
tinspector "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick/inspector"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/vchantempstore"
"github.com/milvus-io/milvus/internal/types"
@ -68,8 +67,7 @@ func Apply(opts ...optResourceInit) {
// Done finish all initialization of resources.
func Done() {
r.segmentStatsManager = shardstats.NewStatsManager()
r.segmentAssignStatsManager = stats.NewStatsManager()
r.segmentStatsManager = stats.NewStatsManager()
r.timeTickInspector = tinspector.NewTimeTickSyncInspector()
r.syncMgr = syncmgr.NewSyncManager(r.chunkManager)
r.wbMgr = writebuffer.NewManager(r.syncMgr)
@ -78,7 +76,7 @@ func Done() {
assertNotNil(r.TSOAllocator())
assertNotNil(r.MixCoordClient())
assertNotNil(r.StreamingNodeCatalog())
assertNotNil(r.SegmentAssignStatsManager())
assertNotNil(r.SegmentStatsManager())
assertNotNil(r.TimeTickInspector())
assertNotNil(r.SyncManager())
assertNotNil(r.WriteBufferManager())
@ -105,8 +103,7 @@ type resourceImpl struct {
chunkManager storage.ChunkManager
mixCoordClient *syncutil.Future[types.MixCoordClient]
streamingNodeCatalog metastore.StreamingNodeCataLog
segmentAssignStatsManager *stats.StatsManager
segmentStatsManager *shardstats.StatsManager
segmentStatsManager *stats.StatsManager
timeTickInspector tinspector.TimeTickSyncInspector
vchannelTempStorage *vchantempstore.VChannelTempStorage
@ -155,15 +152,10 @@ func (r *resourceImpl) StreamingNodeCatalog() metastore.StreamingNodeCataLog {
return r.streamingNodeCatalog
}
func (r *resourceImpl) SegmentStatsManager() *shardstats.StatsManager {
func (r *resourceImpl) SegmentStatsManager() *stats.StatsManager {
return r.segmentStatsManager
}
// SegmentAssignStatManager returns the segment assign stats manager.
func (r *resourceImpl) SegmentAssignStatsManager() *stats.StatsManager {
return r.segmentAssignStatsManager
}
func (r *resourceImpl) TimeTickInspector() tinspector.TimeTickSyncInspector {
return r.timeTickInspector
}

View File

@ -8,8 +8,7 @@ import (
"github.com/milvus-io/milvus/internal/flushcommon/syncmgr"
"github.com/milvus-io/milvus/internal/flushcommon/writebuffer"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
shardstats "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
tinspector "github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick/inspector"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/idalloc"
@ -39,7 +38,6 @@ func InitForTest(t *testing.T, opts ...optResourceInit) {
r.timestampAllocator = idalloc.NewTSOAllocator(r.mixCoordClient)
r.idAllocator = idalloc.NewIDAllocator(r.mixCoordClient)
}
r.segmentAssignStatsManager = stats.NewStatsManager()
r.segmentStatsManager = shardstats.NewStatsManager()
r.segmentStatsManager = stats.NewStatsManager()
r.timeTickInspector = tinspector.NewTimeTickSyncInspector()
}

View File

@ -40,8 +40,7 @@ func buildInterceptorParams(ctx context.Context, underlyingWALImpls walimpls.WAL
return &interceptors.InterceptorBuildParam{
ChannelInfo: underlyingWALImpls.Channel(),
WAL: syncutil.NewFuture[wal.WAL](),
InitializedTimeTick: msg.TimeTick(),
InitializedMessageID: msg.MessageID(),
LastTimeTickMessage: msg,
WriteAheadBuffer: writeAheadBuffer,
MVCCManager: mvccManager,
}, nil
@ -49,10 +48,16 @@ func buildInterceptorParams(ctx context.Context, underlyingWALImpls walimpls.WAL
// sendFirstTimeTick sends the first timetick message to walimpls.
// It is used to make a fence operation with the underlying walimpls and get the timetick and last message id to recover the wal state.
func sendFirstTimeTick(ctx context.Context, underlyingWALImpls walimpls.WALImpls) (message.ImmutableMessage, error) {
logger := resource.Resource().Logger()
func sendFirstTimeTick(ctx context.Context, underlyingWALImpls walimpls.WALImpls) (msg message.ImmutableMessage, err error) {
logger := resource.Resource().Logger().With(zap.String("channel", underlyingWALImpls.Channel().String()))
logger.Info("start to sync first time tick")
defer logger.Info("sync first time tick done")
defer func() {
if err != nil {
logger.Error("sync first time tick failed", zap.Error(err))
return
}
logger.Info("sync first time tick done", zap.String("msgID", msg.MessageID().String()), zap.Uint64("timetick", msg.TimeTick()))
}()
backoffTimer := typeutil.NewBackoffTimer(typeutil.BackoffTimerConfig{
Default: 5 * time.Second,

View File

@ -3,12 +3,19 @@ package adaptor
import (
"context"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/flusher/flusherimpl"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
@ -17,24 +24,26 @@ var _ wal.Opener = (*openerAdaptorImpl)(nil)
// adaptImplsToOpener creates a new wal opener with opener impls.
func adaptImplsToOpener(opener walimpls.OpenerImpls, builders []interceptors.InterceptorBuilder) wal.Opener {
return &openerAdaptorImpl{
o := &openerAdaptorImpl{
lifetime: typeutil.NewLifetime(),
opener: opener,
idAllocator: typeutil.NewIDAllocator(),
walInstances: typeutil.NewConcurrentMap[int64, wal.WAL](),
interceptorBuilders: builders,
logger: log.With(log.FieldComponent("opener")),
}
o.SetLogger(resource.Resource().Logger().With(log.FieldComponent("wal-opener")))
return o
}
// openerAdaptorImpl is the wrapper of OpenerImpls to Opener.
type openerAdaptorImpl struct {
log.Binder
lifetime *typeutil.Lifetime
opener walimpls.OpenerImpls
idAllocator *typeutil.IDAllocator
walInstances *typeutil.ConcurrentMap[int64, wal.WAL] // store all wal instances allocated by these allocator.
interceptorBuilders []interceptors.InterceptorBuilder
logger *log.MLogger
}
// Open opens a wal instance for the channel.
@ -44,28 +53,83 @@ func (o *openerAdaptorImpl) Open(ctx context.Context, opt *wal.OpenOption) (wal.
}
defer o.lifetime.Done()
id := o.idAllocator.Allocate()
logger := o.logger.With(zap.String("channel", opt.Channel.String()), zap.Int64("id", id))
logger := o.Logger().With(zap.String("channel", opt.Channel.String()))
l, err := o.opener.Open(ctx, &walimpls.OpenOption{
Channel: opt.Channel,
})
if err != nil {
logger.Warn("open wal impls failed", zap.Error(err))
return nil, err
}
var wal wal.WAL
switch opt.Channel.AccessMode {
case types.AccessModeRW:
wal, err = o.openRWWAL(ctx, l, opt)
case types.AccessModeRO:
wal, err = o.openROWAL(l)
default:
panic("unknown access mode")
}
if err != nil {
logger.Warn("open wal failed", zap.Error(err))
return nil, err
}
logger.Info("open wal done")
return wal, nil
}
// wrap the wal into walExtend with cleanup function and interceptors.
wal, err := adaptImplsToWAL(ctx, l, o.interceptorBuilders, func() {
// openRWWAL opens a read write wal instance for the channel.
func (o *openerAdaptorImpl) openRWWAL(ctx context.Context, l walimpls.WALImpls, opt *wal.OpenOption) (wal.WAL, error) {
id := o.idAllocator.Allocate()
roWAL := adaptImplsToROWAL(l, func() {
o.walInstances.Remove(id)
logger.Info("wal deleted from opener")
})
if err != nil {
return nil, err
}
// recover the wal state.
param, err := buildInterceptorParams(ctx, l)
if err != nil {
roWAL.Close()
return nil, errors.Wrap(err, "when building interceptor params")
}
rs, snapshot, err := recovery.RecoverRecoveryStorage(ctx, newRecoveryStreamBuilder(roWAL), param.LastTimeTickMessage)
if err != nil {
param.Clear()
roWAL.Close()
return nil, errors.Wrap(err, "when recovering recovery storage")
}
param.InitialRecoverSnapshot = snapshot
param.TxnManager = txn.NewTxnManager(param.ChannelInfo, snapshot.TxnBuffer.GetUncommittedMessageBuilder())
param.ShardManager = shards.RecoverShardManager(&shards.ShardManagerRecoverParam{
ChannelInfo: param.ChannelInfo,
WAL: param.WAL,
InitialRecoverSnapshot: snapshot,
TxnManager: param.TxnManager,
})
// start the flusher to flush and generate recovery info.
var flusher *flusherimpl.WALFlusherImpl
if !opt.DisableFlusher {
flusher = flusherimpl.RecoverWALFlusher(&flusherimpl.RecoverWALFlusherParam{
WAL: param.WAL,
RecoveryStorage: rs,
ChannelInfo: l.Channel(),
RecoverySnapshot: snapshot,
})
}
wal := adaptImplsToRWWAL(roWAL, o.interceptorBuilders, param, flusher)
o.walInstances.Insert(id, wal)
return wal, nil
}
// openROWAL opens a read only wal instance for the channel.
func (o *openerAdaptorImpl) openROWAL(l walimpls.WALImpls) (wal.WAL, error) {
id := o.idAllocator.Allocate()
wal := adaptImplsToROWAL(l, func() {
o.walInstances.Remove(id)
})
o.walInstances.Insert(id, wal)
logger.Info("new wal created")
return wal, nil
}
@ -77,7 +141,7 @@ func (o *openerAdaptorImpl) Close() {
// close all wal instances.
o.walInstances.Range(func(id int64, l wal.WAL) bool {
l.Close()
o.logger.Info("close wal by opener", zap.Int64("id", id), zap.Any("channel", l.Channel()))
o.Logger().Info("close wal by opener", zap.Int64("id", id), zap.String("channel", l.Channel().String()))
return true
})
// close the opener

View File

@ -2,23 +2,15 @@ package adaptor
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/pkg/v2/mocks/streaming/mock_walimpls"
"github.com/milvus-io/milvus/pkg/v2/mocks/streaming/util/mock_message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/walimplstest"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
@ -39,88 +31,3 @@ func TestOpenerAdaptorFailure(t *testing.T) {
assert.ErrorIs(t, err, errExpected)
assert.Nil(t, l)
}
func TestOpenerAdaptor(t *testing.T) {
resource.InitForTest(t)
// Build basic opener.
basicOpener := mock_walimpls.NewMockOpenerImpls(t)
basicOpener.EXPECT().Open(mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, boo *walimpls.OpenOption) (walimpls.WALImpls, error) {
wal := mock_walimpls.NewMockWALImpls(t)
wal.EXPECT().WALName().Return("mock_wal")
wal.EXPECT().Channel().Return(boo.Channel)
wal.EXPECT().Append(mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, mm message.MutableMessage) (message.MessageID, error) {
return walimplstest.NewTestMessageID(1), nil
})
wal.EXPECT().Close().Run(func() {})
return wal, nil
})
basicOpener.EXPECT().Close().Run(func() {})
// Create a opener with mock basic opener.
opener := adaptImplsToOpener(basicOpener, nil)
// Test in concurrency env.
wg := sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
wal, err := opener.Open(context.Background(), &wal.OpenOption{
Channel: types.PChannelInfo{
Name: fmt.Sprintf("test_%d", i),
Term: int64(i),
},
})
if err != nil {
assert.Nil(t, wal)
assertShutdownError(t, err)
return
}
assert.NotNil(t, wal)
for {
msg := mock_message.NewMockMutableMessage(t)
msg.EXPECT().WithWALTerm(mock.Anything).Return(msg).Maybe()
msg.EXPECT().MessageType().Return(message.MessageTypeInsert).Maybe()
msg.EXPECT().EstimateSize().Return(1).Maybe()
msg.EXPECT().IsPersisted().Return(false).Maybe()
msgID, err := wal.Append(context.Background(), msg)
time.Sleep(time.Millisecond * 10)
if err != nil {
assert.Nil(t, msgID)
assertShutdownError(t, err)
return
}
}
}(i)
}
time.Sleep(time.Second * 1)
opener.Close()
// All wal should be closed with Opener.
ch := make(chan struct{})
go func() {
wg.Wait()
close(ch)
}()
select {
case <-time.After(time.Second * 3):
t.Errorf("opener close should be fast")
case <-ch:
}
// open a wal after opener closed should return shutdown error.
_, err := opener.Open(context.Background(), &wal.OpenOption{
Channel: types.PChannelInfo{
Name: "test_after_close",
Term: int64(1),
},
})
assertShutdownError(t, err)
}

View File

@ -0,0 +1,126 @@
package adaptor
import (
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/utility"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
)
var (
_ recovery.RecoveryStreamBuilder = (*recoveryStreamBuilderImpl)(nil)
_ recovery.RecoveryStream = (*recoveryStreamImpl)(nil)
)
// newRecoveryStreamBuilder creates a new recovery stream builder.
func newRecoveryStreamBuilder(roWALImpls *roWALAdaptorImpl) *recoveryStreamBuilderImpl {
return &recoveryStreamBuilderImpl{
roWALAdaptorImpl: roWALImpls,
basicWAL: roWALImpls.roWALImpls.(walimpls.WALImpls),
}
}
// recoveryStreamBuilerImpl is the implementation of RecoveryStreamBuilder.
type recoveryStreamBuilderImpl struct {
*roWALAdaptorImpl
basicWAL walimpls.WALImpls
}
// Build builds a recovery stream.
func (b *recoveryStreamBuilderImpl) Build(param recovery.BuildRecoveryStreamParam) recovery.RecoveryStream {
scanner := newRecoveryScannerAdaptor(b.roWALImpls, param.StartCheckpoint, b.scanMetrics.NewScannerMetrics())
recoveryStream := &recoveryStreamImpl{
notifier: syncutil.NewAsyncTaskNotifier[error](),
param: param,
scanner: scanner,
ch: make(chan message.ImmutableMessage),
txnBuffer: nil,
}
go recoveryStream.execute()
return recoveryStream
}
func (b *recoveryStreamBuilderImpl) RWWALImpls() walimpls.WALImpls {
return b.basicWAL
}
// recoveryStreamImpl is the implementation of RecoveryStream.
type recoveryStreamImpl struct {
notifier *syncutil.AsyncTaskNotifier[error]
scanner *scannerAdaptorImpl
param recovery.BuildRecoveryStreamParam
ch chan message.ImmutableMessage
txnBuffer *utility.TxnBuffer
}
// Chan returns the channel of the recovery stream.
func (r *recoveryStreamImpl) Chan() <-chan message.ImmutableMessage {
return r.ch
}
// Error returns the error of the recovery stream.
func (r *recoveryStreamImpl) Error() error {
return r.notifier.BlockAndGetResult()
}
// TxnBuffer returns the uncommitted txn buffer after recovery stream is done.
func (r *recoveryStreamImpl) TxnBuffer() *utility.TxnBuffer {
if err := r.notifier.BlockAndGetResult(); err != nil {
panic("TxnBuffer should only be called after recovery stream is done")
}
return r.txnBuffer
}
// Close closes the recovery stream.
func (r *recoveryStreamImpl) Close() error {
r.notifier.Cancel()
err := r.notifier.BlockAndGetResult()
return err
}
// execute starts the recovery stream.
func (r *recoveryStreamImpl) execute() (err error) {
defer func() {
close(r.ch)
r.scanner.Close()
if err == nil {
// get the txn buffer after the consuming is done.
r.txnBuffer = r.scanner.txnBuffer
}
r.notifier.Finish(err)
}()
var pendingMessage message.ImmutableMessage
var upstream <-chan message.ImmutableMessage
var downstream chan<- message.ImmutableMessage
for {
if pendingMessage != nil {
// if there is a pending message, we need to send it to the downstream.
upstream = nil
downstream = r.ch
} else {
// if there is no pending message, we need to read from the upstream.
upstream = r.scanner.Chan()
downstream = nil
}
select {
case <-r.notifier.Context().Done():
// canceled.
return r.notifier.Context().Err()
case downstream <- pendingMessage:
if pendingMessage.TimeTick() == r.param.EndTimeTick {
// reach the end of recovery stream, stop the consuming.
return nil
}
pendingMessage = nil
case msg, ok := <-upstream:
if !ok {
return r.scanner.Error()
}
pendingMessage = msg
}
}
}

View File

@ -22,6 +22,40 @@ import (
var _ wal.Scanner = (*scannerAdaptorImpl)(nil)
// newRecoveryScannerAdaptor creates a new recovery scanner adaptor.
func newRecoveryScannerAdaptor(l walimpls.ROWALImpls,
startMessageID message.MessageID,
scanMetrics *metricsutil.ScannerMetrics,
) *scannerAdaptorImpl {
name := "recovery"
logger := resource.Resource().Logger().With(
log.FieldComponent("scanner"),
zap.String("name", name),
zap.String("channel", l.Channel().String()),
zap.String("startMessageID", startMessageID.String()),
)
readOption := wal.ReadOption{
DeliverPolicy: options.DeliverPolicyStartFrom(startMessageID),
MesasgeHandler: adaptor.ChanMessageHandler(make(chan message.ImmutableMessage)),
}
s := &scannerAdaptorImpl{
logger: logger,
recovery: true,
innerWAL: l,
readOption: readOption,
filterFunc: func(message.ImmutableMessage) bool { return true },
reorderBuffer: utility.NewReOrderBuffer(),
pendingQueue: utility.NewPendingQueue(),
txnBuffer: utility.NewTxnBuffer(logger, scanMetrics),
cleanup: func() {},
ScannerHelper: helper.NewScannerHelper(name),
metrics: scanMetrics,
}
go s.execute()
return s
}
// newScannerAdaptor creates a new scanner adaptor.
func newScannerAdaptor(
name string,
@ -29,7 +63,7 @@ func newScannerAdaptor(
readOption wal.ReadOption,
scanMetrics *metricsutil.ScannerMetrics,
cleanup func(),
) wal.Scanner {
) *scannerAdaptorImpl {
if readOption.MesasgeHandler == nil {
readOption.MesasgeHandler = adaptor.ChanMessageHandler(make(chan message.ImmutableMessage))
}
@ -41,6 +75,7 @@ func newScannerAdaptor(
)
s := &scannerAdaptorImpl{
logger: logger,
recovery: false,
innerWAL: l,
readOption: readOption,
filterFunc: options.GetFilterFunc(readOption.MessageFilter),
@ -58,6 +93,7 @@ func newScannerAdaptor(
// scannerAdaptorImpl is a wrapper of ScannerImpls to extend it into a Scanner interface.
type scannerAdaptorImpl struct {
*helper.ScannerHelper
recovery bool
logger *log.MLogger
innerWAL walimpls.ROWALImpls
readOption wal.ReadOption
@ -125,7 +161,9 @@ func (s *scannerAdaptorImpl) execute() {
func (s *scannerAdaptorImpl) produceEventLoop(msgChan chan<- message.ImmutableMessage) error {
var wb wab.ROWriteAheadBuffer
var err error
if s.Channel().AccessMode == types.AccessModeRW {
if s.Channel().AccessMode == types.AccessModeRW && !s.recovery {
// recovery scanner can not use the write ahead buffer, should not trigger sync.
// Trigger a persisted time tick to make sure the timetick is pushed forward.
// because the underlying wal may be deleted because of retention policy.
// So we cannot get the timetick from the wal.
@ -195,8 +233,11 @@ func (s *scannerAdaptorImpl) handleUpstream(msg message.ImmutableMessage) {
if len(msgs) > 0 {
// Push the confirmed messages into pending queue for consuming.
s.pendingQueue.Add(msgs)
} else if s.pendingQueue.Len() == 0 {
// If there's no new message incoming and there's no pending message in the queue.
}
if msg.IsPersisted() || s.pendingQueue.Len() == 0 {
// If the ts message is persisted, it must can be seen by the consumer.
//
// Otherwise if there's no new message incoming and there's no pending message in the queue.
// Add current timetick message into pending queue to make timetick push forward.
// TODO: current milvus can only run on timetick pushing,
// after qview is applied, those trival time tick message can be erased.
@ -230,4 +271,10 @@ func (s *scannerAdaptorImpl) handleUpstream(msg message.ImmutableMessage) {
// Observe the filtered message.
s.metrics.UpdateTimeTickBufSize(s.reorderBuffer.Bytes())
s.metrics.ObservePassedMessage(isTailing, msg.MessageType(), msg.EstimateSize())
if s.logger.Level().Enabled(zap.DebugLevel) {
// Log the message if the log level is debug.
s.logger.Debug("push message into reorder buffer",
log.FieldMessage(msg),
zap.Bool("tailing", isTailing))
}
}

View File

@ -2,12 +2,15 @@ package adaptor
import (
"context"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/cockroachdb/errors"
"go.uber.org/atomic"
"go.uber.org/zap"
"google.golang.org/protobuf/types/known/anypb"
"github.com/milvus-io/milvus/internal/streamingnode/server/flusher/flusherimpl"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
@ -27,13 +30,11 @@ var _ wal.WAL = (*walAdaptorImpl)(nil)
type gracefulCloseFunc func()
// adaptImplsToWAL creates a new wal from wal impls.
func adaptImplsToWAL(
ctx context.Context,
// adaptImplsToROWAL creates a new readonly wal from wal impls.
func adaptImplsToROWAL(
basicWAL walimpls.WALImpls,
builders []interceptors.InterceptorBuilder,
cleanup func(),
) (wal.WAL, error) {
) *roWALAdaptorImpl {
logger := resource.Resource().Logger().With(
log.FieldComponent("wal"),
zap.String("channel", basicWAL.Channel().String()),
@ -52,28 +53,34 @@ func adaptImplsToWAL(
scanMetrics: metricsutil.NewScanMetrics(basicWAL.Channel()),
}
roWAL.SetLogger(logger)
if basicWAL.Channel().AccessMode == types.AccessModeRO {
// if the wal is read-only, return it directly.
return roWAL, nil
}
param, err := buildInterceptorParams(ctx, basicWAL)
if err != nil {
return nil, err
}
return roWAL
}
// adaptImplsToRWWAL creates a new wal from wal impls.
func adaptImplsToRWWAL(
roWAL *roWALAdaptorImpl,
builders []interceptors.InterceptorBuilder,
interceptorParam *interceptors.InterceptorBuildParam,
flusher *flusherimpl.WALFlusherImpl,
) *walAdaptorImpl {
if roWAL.Channel().AccessMode != types.AccessModeRW {
panic("wal should be read-write")
}
// build append interceptor for a wal.
wal := &walAdaptorImpl{
roWALAdaptorImpl: roWAL,
rwWALImpls: basicWAL,
rwWALImpls: roWAL.roWALImpls.(walimpls.WALImpls),
// TODO: remove the pool, use a queue instead.
appendExecutionPool: conc.NewPool[struct{}](0),
param: param,
interceptorBuildResult: buildInterceptor(builders, param),
writeMetrics: metricsutil.NewWriteMetrics(basicWAL.Channel(), basicWAL.WALName()),
param: interceptorParam,
interceptorBuildResult: buildInterceptor(builders, interceptorParam),
flusher: flusher,
writeMetrics: metricsutil.NewWriteMetrics(roWAL.Channel(), roWAL.WALName()),
isFenced: atomic.NewBool(false),
}
param.WAL.Set(wal)
return wal, nil
wal.writeMetrics.SetLogger(wal.roWALAdaptorImpl.Logger())
interceptorParam.WAL.Set(wal)
return wal
}
// walAdaptorImpl is a wrapper of WALImpls to extend it into a WAL interface.
@ -84,6 +91,7 @@ type walAdaptorImpl struct {
appendExecutionPool *conc.Pool[struct{}]
param *interceptors.InterceptorBuildParam
interceptorBuildResult interceptorBuildResult
flusher *flusherimpl.WALFlusherImpl
writeMetrics *metricsutil.WriteMetrics
isFenced *atomic.Bool
}
@ -142,7 +150,7 @@ func (w *walAdaptorImpl) Append(ctx context.Context, msg message.MutableMessage)
return notPersistHint.MessageID, nil
}
metricsGuard.StartWALImplAppend()
msgID, err := w.rwWALImpls.Append(ctx, msg)
msgID, err := w.retryAppendWhenRecoverableError(ctx, msg)
metricsGuard.FinishWALImplAppend()
return msgID, err
})
@ -178,6 +186,37 @@ func (w *walAdaptorImpl) Append(ctx context.Context, msg message.MutableMessage)
return r, nil
}
// retryAppendWhenRecoverableError retries the append operation when recoverable error occurs.
func (w *walAdaptorImpl) retryAppendWhenRecoverableError(ctx context.Context, msg message.MutableMessage) (message.MessageID, error) {
backoff := backoff.NewExponentialBackOff()
backoff.InitialInterval = 10 * time.Millisecond
backoff.MaxInterval = 5 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
// An append operation should be retried until it succeeds or some unrecoverable error occurs.
for i := 0; ; i++ {
msgID, err := w.rwWALImpls.Append(ctx, msg)
if err == nil {
return msgID, nil
}
if errors.IsAny(err, context.Canceled, context.DeadlineExceeded, walimpls.ErrFenced) {
return nil, err
}
w.writeMetrics.ObserveRetry()
nextInterval := backoff.NextBackOff()
w.Logger().Warn("append message into wal impls failed, retrying...", log.FieldMessage(msg), zap.Int("retry", i), zap.Duration("nextInterval", nextInterval), zap.Error(err))
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-w.available.CloseCh():
return nil, status.NewOnShutdownError("wal is on shutdown")
case <-time.After(nextInterval):
}
}
}
// AppendAsync writes a record to the log asynchronously.
func (w *walAdaptorImpl) AppendAsync(ctx context.Context, msg message.MutableMessage, cb func(*wal.AppendResult, error)) {
if !w.lifetime.Add(typeutil.LifetimeStateWorking) {
@ -207,6 +246,13 @@ func (w *walAdaptorImpl) Close() {
w.available.Close()
w.lifetime.Wait()
// close the flusher.
w.Logger().Info("wal begin to close flusher...")
if w.flusher != nil {
// only in test, the flusher is nil.
w.flusher.Close()
}
w.Logger().Info("wal begin to close scanners...")
// close all wal instances.
@ -226,6 +272,9 @@ func (w *walAdaptorImpl) Close() {
w.Logger().Info("close the write ahead buffer...")
w.param.WriteAheadBuffer.Close()
w.Logger().Info("close the segment assignment manager...")
w.param.ShardManager.Close()
w.Logger().Info("call wal cleanup function...")
w.cleanup()
w.Logger().Info("wal closed")

View File

@ -1,271 +0,0 @@
package adaptor
import (
"context"
"sync"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"go.uber.org/atomic"
"github.com/milvus-io/milvus/internal/mocks/mock_metastore"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/interceptors/mock_wab"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/interceptors/timetick/mock_inspector"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/mock_interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/mocks/streaming/mock_walimpls"
"github.com/milvus-io/milvus/pkg/v2/mocks/streaming/util/mock_message"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/walimplstest"
)
func TestWalAdaptorReadFail(t *testing.T) {
resource.InitForTest(t)
l := mock_walimpls.NewMockWALImpls(t)
expectedErr := errors.New("test")
l.EXPECT().WALName().Return("test")
cnt := atomic.NewInt64(2)
l.EXPECT().Append(mock.Anything, mock.Anything).Return(walimplstest.NewTestMessageID(1), nil)
l.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, ro walimpls.ReadOption) (walimpls.ScannerImpls, error) {
if cnt.Dec() < 0 {
s := mock_walimpls.NewMockScannerImpls(t)
s.EXPECT().Chan().Return(make(chan message.ImmutableMessage, 1))
s.EXPECT().Close().Return(nil)
return s, nil
}
return nil, expectedErr
}).Maybe()
l.EXPECT().Close().Return()
// Test adapt to a read-only wal.
l.EXPECT().Channel().Return(types.PChannelInfo{AccessMode: types.AccessModeRO})
lAdapted, err := adaptImplsToWAL(context.Background(), l, nil, func() {})
assert.NoError(t, err)
scanner, err := lAdapted.Read(context.Background(), wal.ReadOption{
VChannel: "test",
})
assert.NoError(t, err)
assert.NotNil(t, scanner)
time.Sleep(time.Second)
assert.True(t, lAdapted.IsAvailable())
lAdapted.Close()
assert.False(t, lAdapted.IsAvailable())
scanner.Close()
// A rw wal should use the write ahead buffer to sync time tick.
writeAheadBuffer := mock_wab.NewMockROWriteAheadBuffer(t)
operator := mock_inspector.NewMockTimeTickSyncOperator(t)
operator.EXPECT().Channel().Return(types.PChannelInfo{}).Maybe()
operator.EXPECT().Sync(mock.Anything, mock.Anything).Return().Maybe()
operator.EXPECT().WriteAheadBuffer().Return(writeAheadBuffer).Maybe()
resource.Resource().TimeTickInspector().RegisterSyncOperator(
operator,
)
// Test adapt to a read-write wal.
l.EXPECT().Channel().Unset()
l.EXPECT().Channel().Return(types.PChannelInfo{AccessMode: types.AccessModeRW})
lAdapted, err = adaptImplsToWAL(context.Background(), l, nil, func() {})
assert.NoError(t, err)
scanner, err = lAdapted.Read(context.Background(), wal.ReadOption{
VChannel: "test",
})
assert.NoError(t, err)
assert.NotNil(t, scanner)
time.Sleep(time.Second)
scanner.Close()
assert.True(t, lAdapted.IsAvailable())
lAdapted.Close()
assert.False(t, lAdapted.IsAvailable())
}
func TestWALAdaptor(t *testing.T) {
snMeta := mock_metastore.NewMockStreamingNodeCataLog(t)
snMeta.EXPECT().GetConsumeCheckpoint(mock.Anything, mock.Anything).Return(nil, nil).Maybe()
snMeta.EXPECT().SaveConsumeCheckpoint(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
resource.InitForTest(t, resource.OptStreamingNodeCatalog(snMeta))
operator := mock_inspector.NewMockTimeTickSyncOperator(t)
operator.EXPECT().Channel().Return(types.PChannelInfo{})
operator.EXPECT().Sync(mock.Anything, mock.Anything).Return()
buffer := mock_wab.NewMockROWriteAheadBuffer(t)
operator.EXPECT().WriteAheadBuffer().Return(buffer)
resource.Resource().TimeTickInspector().RegisterSyncOperator(operator)
// Create a mock WAL implementation
l := mock_walimpls.NewMockWALImpls(t)
l.EXPECT().WALName().Return("test")
l.EXPECT().Append(mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, mm message.MutableMessage) (message.MessageID, error) {
return walimplstest.NewTestMessageID(1), nil
})
l.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, ro walimpls.ReadOption) (walimpls.ScannerImpls, error) {
scanner := mock_walimpls.NewMockScannerImpls(t)
ch := make(chan message.ImmutableMessage, 1)
scanner.EXPECT().Chan().Return(ch)
scanner.EXPECT().Close().RunAndReturn(func() error {
close(ch)
return nil
})
return scanner, nil
})
l.EXPECT().Close().Return()
l.EXPECT().Channel().Return(types.PChannelInfo{AccessMode: types.AccessModeRO})
// Test read only wal
lAdapted, err := adaptImplsToWAL(context.Background(), l, nil, func() {})
assert.NoError(t, err)
assert.Panics(t, func() {
lAdapted.Append(context.Background(), nil)
})
assert.Panics(t, func() {
lAdapted.AppendAsync(context.Background(), nil, nil)
})
assert.Panics(t, func() {
lAdapted.GetLatestMVCCTimestamp(context.Background(), "test")
})
lAdapted.Close()
// Test read-write wal
l.EXPECT().Channel().Unset()
l.EXPECT().Channel().Return(types.PChannelInfo{AccessMode: types.AccessModeRW})
lAdapted, err = adaptImplsToWAL(context.Background(), l, nil, func() {})
assert.NoError(t, err)
assert.NotNil(t, lAdapted.Channel())
msg := mock_message.NewMockMutableMessage(t)
msg.EXPECT().WithWALTerm(mock.Anything).Return(msg).Maybe()
msg.EXPECT().MessageType().Return(message.MessageTypeInsert).Maybe()
msg.EXPECT().EstimateSize().Return(1).Maybe()
msg.EXPECT().IsPersisted().Return(true).Maybe()
msg.EXPECT().MarshalLogObject(mock.Anything).Return(nil).Maybe()
_, err = lAdapted.Append(context.Background(), msg)
assert.NoError(t, err)
lAdapted.AppendAsync(context.Background(), msg, func(mi *wal.AppendResult, err error) {
assert.Nil(t, err)
})
// Test in concurrency env.
wg := sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
scanner, err := lAdapted.Read(context.Background(), wal.ReadOption{VChannel: "test"})
if err != nil {
assertShutdownError(t, err)
return
}
assert.NoError(t, err)
<-scanner.Chan()
}(i)
}
time.Sleep(time.Second * 1)
lAdapted.Close()
// All wal should be closed with Opener.
ch := make(chan struct{})
go func() {
wg.Wait()
close(ch)
}()
select {
case <-time.After(time.Second * 3):
t.Errorf("wal close should be fast")
case <-ch:
}
_, err = lAdapted.Append(context.Background(), msg)
assertShutdownError(t, err)
lAdapted.AppendAsync(context.Background(), msg, func(mi *wal.AppendResult, err error) {
assertShutdownError(t, err)
})
_, err = lAdapted.Read(context.Background(), wal.ReadOption{VChannel: "test"})
assertShutdownError(t, err)
}
func assertShutdownError(t *testing.T, err error) {
e := status.AsStreamingError(err)
assert.Equal(t, e.Code, streamingpb.StreamingCode_STREAMING_CODE_ON_SHUTDOWN)
}
func TestNoInterceptor(t *testing.T) {
l := mock_walimpls.NewMockWALImpls(t)
l.EXPECT().WALName().Return("test")
l.EXPECT().Channel().Return(types.PChannelInfo{})
l.EXPECT().Append(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, mm message.MutableMessage) (message.MessageID, error) {
return walimplstest.NewTestMessageID(1), nil
})
l.EXPECT().Close().Run(func() {})
lWithInterceptors, err := adaptImplsToWAL(context.Background(), l, nil, func() {})
assert.NoError(t, err)
msg := mock_message.NewMockMutableMessage(t)
msg.EXPECT().IsPersisted().Return(true).Maybe()
msg.EXPECT().WithWALTerm(mock.Anything).Return(msg).Maybe()
msg.EXPECT().MessageType().Return(message.MessageTypeInsert).Maybe()
msg.EXPECT().EstimateSize().Return(1).Maybe()
msg.EXPECT().MarshalLogObject(mock.Anything).Return(nil).Maybe()
_, err = lWithInterceptors.Append(context.Background(), msg)
assert.NoError(t, err)
lWithInterceptors.Close()
}
func TestWALWithInterceptor(t *testing.T) {
l := mock_walimpls.NewMockWALImpls(t)
l.EXPECT().Channel().Return(types.PChannelInfo{})
l.EXPECT().Append(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, mm message.MutableMessage) (message.MessageID, error) {
return walimplstest.NewTestMessageID(1), nil
})
l.EXPECT().WALName().Return("test")
l.EXPECT().Close().Run(func() {})
b := mock_interceptors.NewMockInterceptorBuilder(t)
readyCh := make(chan struct{})
b.EXPECT().Build(mock.Anything).RunAndReturn(func(ibp *interceptors.InterceptorBuildParam) interceptors.Interceptor {
interceptor := mock_interceptors.NewMockInterceptorWithReady(t)
interceptor.EXPECT().Ready().Return(readyCh)
interceptor.EXPECT().DoAppend(mock.Anything, mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, mm message.MutableMessage, f func(context.Context, message.MutableMessage) (message.MessageID, error)) (message.MessageID, error) {
return f(ctx, mm)
})
interceptor.EXPECT().Close().Run(func() {})
return interceptor
})
lWithInterceptors, err := adaptImplsToWAL(context.Background(), l, []interceptors.InterceptorBuilder{b}, func() {})
assert.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
// Interceptor is not ready, so the append/read will be blocked until timeout.
msg := mock_message.NewMockMutableMessage(t)
msg.EXPECT().WithWALTerm(mock.Anything).Return(msg).Maybe()
msg.EXPECT().MessageType().Return(message.MessageTypeInsert).Maybe()
msg.EXPECT().EstimateSize().Return(1).Maybe()
msg.EXPECT().IsPersisted().Return(true).Maybe()
msg.EXPECT().MarshalLogObject(mock.Anything).Return(nil).Maybe()
_, err = lWithInterceptors.Append(ctx, msg)
assert.ErrorIs(t, err, context.DeadlineExceeded)
// Interceptor is ready, so the append/read will return soon.
close(readyCh)
_, err = lWithInterceptors.Append(context.Background(), msg)
assert.NoError(t, err)
lWithInterceptors.Close()
}

View File

@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/remeh/sizedwaitgroup"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@ -19,18 +20,20 @@ import (
"github.com/milvus-io/milvus/internal/mocks/mock_metastore"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/lock"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/redo"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/registry"
internaltypes "github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/idalloc"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/options"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/walimplstest"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
@ -44,14 +47,18 @@ type walTestFramework struct {
messageCount int
}
func TestFencedError(t *testing.T) {
assert.True(t, errors.IsAny(errors.Mark(errors.New("test"), walimpls.ErrFenced), context.Canceled, walimpls.ErrFenced))
assert.True(t, errors.IsAny(errors.Wrap(walimpls.ErrFenced, "some message"), context.Canceled, walimpls.ErrFenced))
}
func TestWAL(t *testing.T) {
initResourceForTest(t)
b := registry.MustGetBuilder(walimplstest.WALName,
redo.NewInterceptorBuilder(),
// TODO: current flusher interceptor cannot work well with the walimplstest.
// flusher.NewInterceptorBuilder(),
lock.NewInterceptorBuilder(),
timetick.NewInterceptorBuilder(),
segment.NewInterceptorBuilder(),
shard.NewInterceptorBuilder(),
)
f := &walTestFramework{
b: b,
@ -70,11 +77,13 @@ func initResourceForTest(t *testing.T) {
rc := idalloc.NewMockRootCoordClient(t)
rc.EXPECT().GetPChannelInfo(mock.Anything, mock.Anything).Return(&rootcoordpb.GetPChannelInfoResponse{}, nil)
rc.EXPECT().AllocSegment(mock.Anything, mock.Anything).Return(&datapb.AllocSegmentResponse{}, nil)
catalog := mock_metastore.NewMockStreamingNodeCataLog(t)
catalog.EXPECT().GetConsumeCheckpoint(mock.Anything, mock.Anything).Return(nil, nil)
catalog.EXPECT().SaveConsumeCheckpoint(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
catalog.EXPECT().ListSegmentAssignment(mock.Anything, mock.Anything).Return(nil, nil)
catalog.EXPECT().SaveSegmentAssignments(mock.Anything, mock.Anything, mock.Anything).Return(nil)
catalog.EXPECT().SaveSegmentAssignments(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
catalog.EXPECT().ListVChannel(mock.Anything, mock.Anything).Return(nil, nil)
catalog.EXPECT().SaveVChannels(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
fMixCoordClient := syncutil.NewFuture[internaltypes.MixCoordClient]()
fMixCoordClient.Set(rc)
resource.InitForTest(
@ -132,6 +141,7 @@ func (f *testOneWALFramework) Run() {
}
rwWAL, err := f.opener.Open(ctx, &wal.OpenOption{
Channel: pChannel,
DisableFlusher: true,
})
assert.NoError(f.t, err)
assert.NotNil(f.t, rwWAL)
@ -140,12 +150,29 @@ func (f *testOneWALFramework) Run() {
pChannel.AccessMode = types.AccessModeRO
roWAL, err := f.opener.Open(ctx, &wal.OpenOption{
Channel: pChannel,
DisableFlusher: true,
})
assert.NoError(f.t, err)
f.testReadAndWrite(ctx, rwWAL, roWAL)
// close the wal
rwWAL.Close()
roWAL.Close()
walimplstest.EnableFenced(pChannel.Name)
// create collection before start test
createMsg := message.NewCreateCollectionMessageBuilderV1().
WithHeader(&message.CreateCollectionMessageHeader{
CollectionId: 100,
PartitionIds: []int64{200},
}).
WithBody(&msgpb.CreateCollectionRequest{}).
WithVChannel(testVChannel).
MustBuildMutable()
result, err := rwWAL.Append(ctx, createMsg)
assert.Nil(f.t, result)
assert.True(f.t, status.AsStreamingError(err).IsFenced())
walimplstest.DisableFenced(pChannel.Name)
rwWAL.Close()
}
}
@ -264,9 +291,13 @@ func (f *testOneWALFramework) testSendDropCollection(ctx context.Context, w wal.
BuildMutable()
assert.NoError(f.t, err)
msgID, err := w.Append(ctx, dropMsg)
done := make(chan struct{})
w.AppendAsync(ctx, dropMsg, func(ar *wal.AppendResult, err error) {
assert.NoError(f.t, err)
assert.NotNil(f.t, msgID)
assert.NotNil(f.t, ar)
close(done)
})
<-done
}
func (f *testOneWALFramework) testAppend(ctx context.Context, w wal.WAL) ([]message.ImmutableMessage, error) {
@ -293,8 +324,7 @@ func (f *testOneWALFramework) testAppend(ctx context.Context, w wal.WAL) ([]mess
assert.NotNil(f.t, appendResult)
immutableMsg := msg.IntoImmutableMessage(appendResult.MessageID)
begin, err := message.AsImmutableBeginTxnMessageV2(immutableMsg)
assert.NoError(f.t, err)
begin := message.MustAsImmutableBeginTxnMessageV2(immutableMsg)
b := message.NewImmutableTxnMessageBuilder(begin)
txnCtx := appendResult.TxnCtx
for i := 0; i < int(rand.Int31n(5)); i++ {

View File

@ -17,6 +17,7 @@ type OpenerBuilder interface {
// OpenOption is the option for allocating wal instance.
type OpenOption struct {
Channel types.PChannelInfo
DisableFlusher bool // disable flusher for test, only use in test.
}
// Opener is the interface for build wal instance.

View File

@ -10,8 +10,8 @@ import (
var _ InterceptorWithReady = (*chainedInterceptor)(nil)
type (
// appendInterceptorCall is the common function to execute the append interceptor.
appendInterceptorCall = func(ctx context.Context, msg message.MutableMessage, append Append) (message.MessageID, error)
// AppendInterceptorCall is the common function to execute the append interceptor.
AppendInterceptorCall = func(ctx context.Context, msg message.MutableMessage, append Append) (message.MessageID, error)
)
// NewChainedInterceptor creates a new chained interceptor.
@ -27,7 +27,7 @@ func NewChainedInterceptor(interceptors ...Interceptor) InterceptorWithReady {
type chainedInterceptor struct {
closed chan struct{}
interceptors []Interceptor
appendCall appendInterceptorCall
appendCall AppendInterceptorCall
}
// Ready wait all interceptors to be ready.
@ -63,7 +63,7 @@ func (c *chainedInterceptor) Close() {
}
// chainAppendInterceptors chains all unary client interceptors into one.
func chainAppendInterceptors(interceptors []Interceptor) appendInterceptorCall {
func chainAppendInterceptors(interceptors []Interceptor) AppendInterceptorCall {
if len(interceptors) == 0 {
// Do nothing if no interceptors.
return func(ctx context.Context, msg message.MutableMessage, append Append) (message.MessageID, error) {
@ -100,7 +100,7 @@ func getChainAppendInvoker(interceptors []Interceptor, idx int, finalInvoker App
}
// adaptAppendWithMetricCollecting adapts the append interceptor with metric collecting.
func adaptAppendWithMetricCollecting(name string, append appendInterceptorCall) appendInterceptorCall {
func adaptAppendWithMetricCollecting(name string, append AppendInterceptorCall) AppendInterceptorCall {
return func(ctx context.Context, msg message.MutableMessage, invoker Append) (message.MessageID, error) {
c := utility.MustGetAppendMetrics(ctx).StartInterceptorCollector(name)
msgID, err := append(ctx, msg, func(ctx context.Context, msg message.MutableMessage) (message.MessageID, error) {

View File

@ -1,22 +0,0 @@
package flusher
import (
"github.com/milvus-io/milvus/internal/streamingnode/server/flusher/flusherimpl"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
)
// NewInterceptorBuilder creates a new flusher interceptor builder.
func NewInterceptorBuilder() interceptors.InterceptorBuilder {
return &interceptorBuilder{}
}
// interceptorBuilder is the builder for flusher interceptor.
type interceptorBuilder struct{}
// Build creates a new flusher interceptor.
func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) interceptors.Interceptor {
flusher := flusherimpl.RecoverWALFlusher(param)
return &flusherAppendInterceptor{
flusher: flusher,
}
}

View File

@ -1,37 +0,0 @@
package flusher
import (
"context"
"github.com/milvus-io/milvus/internal/streamingnode/server/flusher/flusherimpl"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
)
const (
interceptorName = "flusher"
)
var (
_ interceptors.Interceptor = (*flusherAppendInterceptor)(nil)
_ interceptors.InterceptorWithGracefulClose = (*flusherAppendInterceptor)(nil)
)
// flusherAppendInterceptor is an append interceptor to handle the append operation from consumer.
// the flusher is a unique consumer that will consume the message from wal.
// It will handle the message and persist the message other storage from wal.
type flusherAppendInterceptor struct {
flusher *flusherimpl.WALFlusherImpl
}
func (c *flusherAppendInterceptor) DoAppend(ctx context.Context, msg message.MutableMessage, append interceptors.Append) (msgID message.MessageID, err error) {
// TODO: The interceptor will also do some slow down for streaming service if the consumer is lag too much.
return append(ctx, msg)
}
// GracefulClose will close the flusher gracefully.
func (c *flusherAppendInterceptor) GracefulClose() {
c.flusher.Close()
}
func (c *flusherAppendInterceptor) Close() {}

View File

@ -4,8 +4,11 @@ import (
"context"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick/mvcc"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/wab"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/recovery"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
@ -16,13 +19,26 @@ type (
Append = func(ctx context.Context, msg message.MutableMessage) (message.MessageID, error)
)
// InterceptorBuildParam is the parameter to build a interceptor.
type InterceptorBuildParam struct {
ChannelInfo types.PChannelInfo
WAL *syncutil.Future[wal.WAL] // The wal final object, can be used after interceptor is ready.
InitializedTimeTick uint64 // The time tick is initialized, can be used to skip the time tick append.
InitializedMessageID message.MessageID // The message id of the last message in the wal, can be used to skip the message id append.
LastTimeTickMessage message.ImmutableMessage // The last time tick message in wal.
WriteAheadBuffer *wab.WriteAheadBuffer // The write ahead buffer for the wal, used to erase the subscription of underlying wal.
MVCCManager *mvcc.MVCCManager // The MVCC manager for the wal, can be used to get the latest mvcc timetick.
InitialRecoverSnapshot *recovery.RecoverySnapshot // The initial recover snapshot for the wal, used to recover the wal state.
TxnManager *txn.TxnManager // The transaction manager for the wal, used to manage the transactions.
ShardManager shards.ShardManager // The shard manager for the wal, used to manage the shards, segment assignment, partition.
}
// Clear release the resources in the interceptor build param.
func (p *InterceptorBuildParam) Clear() {
if p.WriteAheadBuffer != nil {
p.WriteAheadBuffer.Close()
}
if p.ShardManager != nil {
p.ShardManager.Close()
}
}
// InterceptorBuilder is the interface to build a interceptor.

View File

@ -6,7 +6,6 @@ import (
)
// NewInterceptorBuilder creates a new redo interceptor builder.
// TODO: add it into wal after recovery storage is merged.
func NewInterceptorBuilder() interceptors.InterceptorBuilder {
return &interceptorBuilder{}
}
@ -18,6 +17,6 @@ type interceptorBuilder struct{}
func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) interceptors.Interceptor {
return &lockAppendInterceptor{
vchannelLocker: lock.NewKeyLock[string](),
// TODO: txnManager will be intiailized by param txnManager: param.TxnManager,
txnManager: param.TxnManager,
}
}

View File

@ -12,5 +12,8 @@ type interceptorBuilder struct{}
// Build creates a new redo interceptor.
func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) interceptors.Interceptor {
return &redoAppendInterceptor{}
return &redoAppendInterceptor{
shardManager: param.ShardManager,
gracefulStop: make(chan struct{}),
}
}

View File

@ -6,17 +6,23 @@ import (
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
)
var (
_ interceptors.Interceptor = (*redoAppendInterceptor)(nil)
_ interceptors.InterceptorWithGracefulClose = (*redoAppendInterceptor)(nil)
ErrRedo = errors.New("redo")
)
// redoAppendInterceptor is an append interceptor to retry the append operation if needed.
// It's useful when the append operation want to refresh the append context (such as timetick belong to the message)
type redoAppendInterceptor struct{}
type redoAppendInterceptor struct {
shardManager shards.ShardManager
gracefulStop chan struct{}
}
// TODO: should be removed after lock-based before timetick is applied.
func (r *redoAppendInterceptor) DoAppend(ctx context.Context, msg message.MutableMessage, append interceptors.Append) (msgID message.MessageID, err error) {
@ -24,6 +30,10 @@ func (r *redoAppendInterceptor) DoAppend(ctx context.Context, msg message.Mutabl
if ctx.Err() != nil {
return nil, ctx.Err()
}
if err := r.waitUntilGrowingSegmentReady(ctx, msg); err != nil {
return nil, err
}
msgID, err = append(ctx, msg)
// If the error is ErrRedo, we should redo the append operation.
if errors.Is(err, ErrRedo) {
@ -33,4 +43,32 @@ func (r *redoAppendInterceptor) DoAppend(ctx context.Context, msg message.Mutabl
}
}
// waitUntilGrowingSegmentReady waits until the growing segment is ready if msg is insert.
func (r *redoAppendInterceptor) waitUntilGrowingSegmentReady(ctx context.Context, msg message.MutableMessage) error {
if msg.MessageType() == message.MessageTypeInsert {
insertMessage := message.MustAsMutableInsertMessageV1(msg)
h := insertMessage.Header()
for _, partition := range h.Partitions {
ready, err := r.shardManager.WaitUntilGrowingSegmentReady(h.CollectionId, partition.PartitionId)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ready:
// do nothing
return nil
case <-r.gracefulStop:
return status.NewOnShutdownError("redo interceptor is on shutdown")
}
}
}
return nil
}
func (r *redoAppendInterceptor) GracefulClose() {
close(r.gracefulStop)
}
func (r *redoAppendInterceptor) Close() {}

View File

@ -1,35 +0,0 @@
package segment
import (
"context"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/manager"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
)
func NewInterceptorBuilder() interceptors.InterceptorBuilder {
return &interceptorBuilder{}
}
type interceptorBuilder struct{}
func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) interceptors.Interceptor {
assignManager := syncutil.NewFuture[*manager.PChannelSegmentAllocManager]()
ctx, cancel := context.WithCancel(context.Background())
segmentInterceptor := &segmentInterceptor{
ctx: ctx,
cancel: cancel,
logger: resource.Resource().Logger().With(
log.FieldComponent("segment-assigner"),
zap.Any("pchannel", param.ChannelInfo),
),
assignManager: assignManager,
}
go segmentInterceptor.recoverPChannelManager(param)
return segmentInterceptor
}

View File

@ -1,165 +0,0 @@
package inspector
import (
"context"
"time"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
const (
defaultSealAllInterval = 10 * time.Second
defaultMustSealInterval = 200 * time.Millisecond
)
// NewSealedInspector creates a new seal inspector.
func NewSealedInspector(n *stats.SealSignalNotifier) SealOperationInspector {
s := &sealOperationInspectorImpl{
taskNotifier: syncutil.NewAsyncTaskNotifier[struct{}](),
managers: typeutil.NewConcurrentMap[string, SealOperator](),
notifier: n,
backOffTimer: typeutil.NewBackoffTimer(typeutil.BackoffTimerConfig{
Default: 1 * time.Second,
Backoff: typeutil.BackoffConfig{
InitialInterval: 20 * time.Millisecond,
Multiplier: 2.0,
MaxInterval: 1 * time.Second,
},
}),
triggerCh: make(chan string),
logger: resource.Resource().Logger().With(log.FieldComponent("segment-assigner")),
}
go s.background()
return s
}
// sealOperationInspectorImpl is the implementation of SealInspector.
type sealOperationInspectorImpl struct {
taskNotifier *syncutil.AsyncTaskNotifier[struct{}]
managers *typeutil.ConcurrentMap[string, SealOperator]
notifier *stats.SealSignalNotifier
backOffTimer *typeutil.BackoffTimer
triggerCh chan string
logger *log.MLogger
}
// TriggerSealWaited implements SealInspector.TriggerSealWaited.
func (s *sealOperationInspectorImpl) TriggerSealWaited(ctx context.Context, pchannel string) error {
select {
case <-ctx.Done():
return ctx.Err()
case s.triggerCh <- pchannel:
return nil
}
}
// RegisterPChannelManager implements SealInspector.RegisterPChannelManager.
func (s *sealOperationInspectorImpl) RegisterPChannelManager(m SealOperator) {
_, loaded := s.managers.GetOrInsert(m.Channel().Name, m)
if loaded {
panic("pchannel manager already exists, critical bug in code")
}
}
// UnregisterPChannelManager implements SealInspector.UnregisterPChannelManager.
func (s *sealOperationInspectorImpl) UnregisterPChannelManager(m SealOperator) {
_, loaded := s.managers.GetAndRemove(m.Channel().Name)
if !loaded {
panic("pchannel manager not found, critical bug in code")
}
}
// Close implements SealInspector.Close.
func (s *sealOperationInspectorImpl) Close() {
s.taskNotifier.Cancel()
s.taskNotifier.BlockUntilFinish()
}
// background is the background task to inspect if a segment should be sealed or not.
func (s *sealOperationInspectorImpl) background() {
defer s.taskNotifier.Finish(struct{}{})
sealAllTicker := time.NewTicker(defaultSealAllInterval)
defer sealAllTicker.Stop()
mustSealTicker := time.NewTicker(defaultMustSealInterval)
defer mustSealTicker.Stop()
var backoffCh <-chan time.Time
for {
if s.shouldEnableBackoff() {
// start a backoff if there's some pchannel wait for seal.
s.backOffTimer.EnableBackoff()
backoffCh, _ = s.backOffTimer.NextTimer()
} else {
s.backOffTimer.DisableBackoff()
}
select {
case <-s.taskNotifier.Context().Done():
return
case pchannel := <-s.triggerCh:
if manager, ok := s.managers.Get(pchannel); ok {
manager.TryToSealWaitedSegment(s.taskNotifier.Context())
}
case <-s.notifier.WaitChan():
s.tryToSealPartition(s.notifier.Get())
case <-backoffCh:
// only seal waited segment for backoff.
s.managers.Range(func(_ string, pm SealOperator) bool {
pm.TryToSealWaitedSegment(s.taskNotifier.Context())
return true
})
case <-sealAllTicker.C:
s.managers.Range(func(_ string, pm SealOperator) bool {
pm.TryToSealSegments(s.taskNotifier.Context())
return true
})
case <-mustSealTicker.C:
threshold := paramtable.Get().DataCoordCfg.GrowingSegmentsMemSizeInMB.GetAsUint64() * 1024 * 1024
segmentBelongs := resource.Resource().SegmentAssignStatsManager().SealByTotalGrowingSegmentsSize(threshold)
if segmentBelongs == nil {
continue
}
s.logger.Info("seal by total growing segments size", zap.String("vchannel", segmentBelongs.VChannel),
zap.Uint64("sealThreshold", threshold),
zap.Int64("sealSegment", segmentBelongs.SegmentID))
if pm, ok := s.managers.Get(segmentBelongs.PChannel); ok {
pm.MustSealSegments(s.taskNotifier.Context(), *segmentBelongs)
}
}
}
}
// shouldEnableBackoff checks if the backoff should be enabled.
// if there's any pchannel has a segment wait for seal, enable backoff.
func (s *sealOperationInspectorImpl) shouldEnableBackoff() bool {
enableBackoff := false
s.managers.Range(func(_ string, pm SealOperator) bool {
if !pm.IsNoWaitSeal() {
enableBackoff = true
return false
}
return true
})
return enableBackoff
}
// tryToSealPartition tries to seal the segment with the specified policies.
func (s *sealOperationInspectorImpl) tryToSealPartition(infos typeutil.Set[stats.SegmentBelongs]) {
for info := range infos {
pm, ok := s.managers.Get(info.PChannel)
if !ok {
continue
}
pm.TryToSealSegments(s.taskNotifier.Context(), info)
}
}

View File

@ -1,58 +0,0 @@
package inspector
import (
"context"
"sync"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
)
var (
segmentSealedInspector SealOperationInspector
initOnce sync.Once
)
func GetSegmentSealedInspector() SealOperationInspector {
initOnce.Do(func() {
segmentSealedInspector = NewSealedInspector(resource.Resource().SegmentAssignStatsManager().SealNotifier())
})
return segmentSealedInspector
}
// SealOperationInspector is the inspector to check if a segment should be sealed or not.
type SealOperationInspector interface {
// TriggerSealWaited triggers the seal waited segment.
TriggerSealWaited(ctx context.Context, pchannel string) error
// RegisterPChannelManager registers a pchannel manager.
RegisterPChannelManager(m SealOperator)
// UnregisterPChannelManager unregisters a pchannel manager.
UnregisterPChannelManager(m SealOperator)
// Close closes the inspector.
Close()
}
// SealOperator is a segment seal operator.
type SealOperator interface {
// Channel returns the pchannel info.
Channel() types.PChannelInfo
// TryToSealSegments tries to seal the segment, if info is given, seal operation is only applied to related partitions and waiting seal segments,
// Otherwise, seal operation is applied to all partitions.
// Return false if there's some segment wait for seal but not sealed.
TryToSealSegments(ctx context.Context, infos ...stats.SegmentBelongs)
// TryToSealWaitedSegment tries to seal the wait for sealing segment.
// Return false if there's some segment wait for seal but not sealed.
TryToSealWaitedSegment(ctx context.Context)
// MustSealSegments seals the given segments and waiting seal segments.
MustSealSegments(ctx context.Context, infos ...stats.SegmentBelongs)
// IsNoWaitSeal returns whether there's no segment wait for seal.
IsNoWaitSeal() bool
}

View File

@ -1,69 +0,0 @@
package inspector
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/mock"
"go.uber.org/atomic"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/interceptors/segment/mock_inspector"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
func TestSealedInspector(t *testing.T) {
paramtable.Init()
resource.InitForTest(t)
notifier := stats.NewSealSignalNotifier()
inspector := NewSealedInspector(notifier)
o := mock_inspector.NewMockSealOperator(t)
ops := atomic.NewInt32(0)
o.EXPECT().Channel().Return(types.PChannelInfo{Name: "v1"})
o.EXPECT().TryToSealSegments(mock.Anything, mock.Anything).
RunAndReturn(func(ctx context.Context, sb ...stats.SegmentBelongs) {
ops.Add(1)
})
o.EXPECT().TryToSealWaitedSegment(mock.Anything).
RunAndReturn(func(ctx context.Context) {
ops.Add(1)
})
o.EXPECT().IsNoWaitSeal().RunAndReturn(func() bool {
return ops.Load()%2 == 0
})
inspector.RegisterPChannelManager(o)
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
inspector.TriggerSealWaited(context.Background(), "v1")
ops.Add(1)
}
}()
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
notifier.AddAndNotify(stats.SegmentBelongs{
PChannel: "v1",
VChannel: "vv1",
CollectionID: 12,
PartitionID: 1,
SegmentID: 2,
})
time.Sleep(5 * time.Millisecond)
}
time.Sleep(500 * time.Millisecond)
}()
wg.Wait()
inspector.UnregisterPChannelManager(o)
inspector.Close()
}

View File

@ -1,30 +0,0 @@
package manager
import (
"go.uber.org/atomic"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
)
// AssignSegmentRequest is a request to allocate segment.
type AssignSegmentRequest struct {
CollectionID int64
PartitionID int64
InsertMetrics stats.InsertMetrics
TimeTick uint64
TxnSession *txn.TxnSession
}
// AssignSegmentResult is a result of segment allocation.
// The sum of Results.Row is equal to InserMetrics.NumRows.
type AssignSegmentResult struct {
SegmentID int64
Acknowledge *atomic.Int32 // used to ack the segment assign result has been consumed
}
// Ack acks the segment assign result has been consumed.
// Must be only call once after the segment assign result has been consumed.
func (r *AssignSegmentResult) Ack() {
r.Acknowledge.Dec()
}

View File

@ -1,352 +0,0 @@
package manager
import (
"context"
"sync"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/policy"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/metricsutil"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
)
var ErrFencedAssign = errors.New("fenced assign")
// newPartitionSegmentManager creates a new partition segment assign manager.
func newPartitionSegmentManager(
wal *syncutil.Future[wal.WAL],
pchannel types.PChannelInfo,
vchannel string,
collectionID int64,
paritionID int64,
segments []*segmentAllocManager,
metrics *metricsutil.SegmentAssignMetrics,
) *partitionSegmentManager {
return &partitionSegmentManager{
mu: sync.Mutex{},
logger: resource.Resource().Logger().With(
log.FieldComponent("segment-assigner"),
zap.Any("pchannel", pchannel),
zap.Any("pchannel", pchannel),
zap.String("vchannel", vchannel),
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", paritionID)),
wal: wal,
pchannel: pchannel,
vchannel: vchannel,
collectionID: collectionID,
paritionID: paritionID,
segments: segments,
metrics: metrics,
}
}
// partitionSegmentManager is a assign manager of determined partition on determined vchannel.
type partitionSegmentManager struct {
mu sync.Mutex
logger *log.MLogger
wal *syncutil.Future[wal.WAL]
pchannel types.PChannelInfo
vchannel string
collectionID int64
paritionID int64
segments []*segmentAllocManager // there will be very few segments in this list.
fencedAssignTimeTick uint64 // the time tick that the assign operation is fenced.
metrics *metricsutil.SegmentAssignMetrics
}
func (m *partitionSegmentManager) CollectionID() int64 {
return m.collectionID
}
// AssignSegment assigns a segment for a assign segment request.
func (m *partitionSegmentManager) AssignSegment(ctx context.Context, req *AssignSegmentRequest) (*AssignSegmentResult, error) {
m.mu.Lock()
defer m.mu.Unlock()
// !!! We have promised that the fencedAssignTimeTick is always less than new incoming insert request by Barrier TimeTick of ManualFlush.
// So it's just a promise check here.
// If the request time tick is less than the fenced time tick, the assign operation is fenced.
// A special error will be returned to indicate the assign operation is fenced.
if req.TimeTick <= m.fencedAssignTimeTick {
return nil, ErrFencedAssign
}
return m.assignSegment(ctx, req)
}
// SealAndFenceSegmentUntil seal all segment that contains the message less than the incoming timetick.
func (m *partitionSegmentManager) SealAndFenceSegmentUntil(timeTick uint64) (sealedSegments []*segmentAllocManager) {
m.mu.Lock()
defer m.mu.Unlock()
// no-op if the incoming time tick is less than the fenced time tick.
if timeTick <= m.fencedAssignTimeTick {
return
}
segmentManagers := m.collectShouldBeSealedWithPolicy(func(segmentMeta *segmentAllocManager) (policy.PolicyName, bool) { return policy.PolicyNameFenced, true })
// fence the assign operation until the incoming time tick or latest assigned timetick.
// The new incoming assignment request will be fenced.
// So all the insert operation before the fenced time tick cannot added to the growing segment (no more insert can be applied on it).
// In other words, all insert operation before the fenced time tick will be sealed
if timeTick > m.fencedAssignTimeTick {
m.fencedAssignTimeTick = timeTick
}
return segmentManagers
}
// CollectShouldBeSealed try to collect all segments that should be sealed.
func (m *partitionSegmentManager) CollectShouldBeSealed() []*segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
return m.collectShouldBeSealedWithPolicy(m.hitSealPolicy)
}
// CollectionMustSealed seals the specified segment.
func (m *partitionSegmentManager) CollectionMustSealed(segmentID int64) *segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
var target *segmentAllocManager
m.segments = lo.Filter(m.segments, func(segment *segmentAllocManager, _ int) bool {
if segment.inner.GetSegmentId() == segmentID {
target = segment.WithSealPolicy(policy.PolicyNameForce)
return false
}
return true
})
return target
}
// collectShouldBeSealedWithPolicy collects all segments that should be sealed by policy.
func (m *partitionSegmentManager) collectShouldBeSealedWithPolicy(predicates func(segmentMeta *segmentAllocManager) (policy.PolicyName, bool)) []*segmentAllocManager {
shouldBeSealedSegments := make([]*segmentAllocManager, 0, len(m.segments))
segments := make([]*segmentAllocManager, 0, len(m.segments))
for _, segment := range m.segments {
// A already sealed segment may be came from recovery.
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED {
shouldBeSealedSegments = append(shouldBeSealedSegments, segment.WithSealPolicy(policy.PolicyNameRecover))
m.logger.Info("segment has been sealed, remove it from assignment",
zap.Int64("segmentID", segment.GetSegmentID()),
zap.String("state", segment.GetState().String()),
zap.Any("stat", segment.GetStat()),
)
continue
}
// policy hitted growing segment should be removed from assignment manager.
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
if policyName, shouldBeSealed := predicates(segment); shouldBeSealed {
shouldBeSealedSegments = append(shouldBeSealedSegments, segment.WithSealPolicy(policyName))
m.logger.Info("segment should be sealed by policy",
zap.Int64("segmentID", segment.GetSegmentID()),
zap.String("policy", string(policyName)),
zap.Any("stat", segment.GetStat()),
)
continue
}
}
segments = append(segments, segment)
}
m.segments = segments
return shouldBeSealedSegments
}
// CollectAllSegmentsAndClear collects all segments in the manager and clear the manager.
func (m *partitionSegmentManager) CollectAllSegmentsAndClear() []*segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
segments := m.segments
m.segments = nil
return segments
}
// CollectAllCanBeSealedAndClear collects all segments that can be sealed and clear the manager.
func (m *partitionSegmentManager) CollectAllCanBeSealedAndClear(policy policy.PolicyName) []*segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
canBeSealed := make([]*segmentAllocManager, 0, len(m.segments))
for _, segment := range m.segments {
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING ||
segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED {
canBeSealed = append(canBeSealed, segment.WithSealPolicy(policy))
}
}
m.segments = make([]*segmentAllocManager, 0)
return canBeSealed
}
// hitSealPolicy checks if the segment should be sealed by policy.
func (m *partitionSegmentManager) hitSealPolicy(segmentMeta *segmentAllocManager) (policy.PolicyName, bool) {
stat := segmentMeta.GetStat()
for _, p := range policy.GetSegmentAsyncSealPolicy() {
if result := p.ShouldBeSealed(stat); result.ShouldBeSealed {
m.logger.Info("segment should be sealed by policy",
zap.Int64("segmentID", segmentMeta.GetSegmentID()),
zap.String("policy", string(result.PolicyName)),
zap.Any("stat", stat),
zap.Any("extraInfo", result.ExtraInfo),
)
return result.PolicyName, true
}
}
return "", false
}
// allocNewGrowingSegment allocates a new growing segment.
// After this operation, the growing segment can be seen at datacoord.
func (m *partitionSegmentManager) allocNewGrowingSegment(ctx context.Context) (*segmentAllocManager, error) {
// A pending segment may be already created when failure or recovery.
pendingSegment := m.findPendingSegmentInMeta()
if pendingSegment == nil {
// if there's no pending segment, create a new pending segment.
var err error
if pendingSegment, err = m.createNewPendingSegment(ctx); err != nil {
return nil, err
}
}
// Transfer the pending segment into growing state.
// Alloc the growing segment at datacoord first.
mix, err := resource.Resource().MixCoordClient().GetWithContext(ctx)
if err != nil {
return nil, err
}
resp, err := mix.AllocSegment(ctx, &datapb.AllocSegmentRequest{
CollectionId: pendingSegment.GetCollectionID(),
PartitionId: pendingSegment.GetPartitionID(),
SegmentId: pendingSegment.GetSegmentID(),
Vchannel: pendingSegment.GetVChannel(),
StorageVersion: pendingSegment.GetStorageVersion(),
IsCreatedByStreaming: true,
})
if err := merr.CheckRPCCall(resp, err); err != nil {
return nil, errors.Wrap(err, "failed to alloc growing segment at datacoord")
}
// Getnerate growing segment limitation.
limitation := policy.GetSegmentLimitationPolicy().GenerateLimitation()
msg, err := message.NewCreateSegmentMessageBuilderV2().
WithVChannel(pendingSegment.GetVChannel()).
WithHeader(&message.CreateSegmentMessageHeader{
CollectionId: pendingSegment.GetCollectionID(),
// We only execute one segment creation operation at a time.
// But in future, we need to modify the segment creation operation to support batch creation.
// Because the partition-key based collection may create huge amount of segments at the same time.
PartitionId: pendingSegment.GetPartitionID(),
SegmentId: pendingSegment.GetSegmentID(),
StorageVersion: pendingSegment.GetStorageVersion(),
MaxSegmentSize: limitation.SegmentSize,
}).
WithBody(&message.CreateSegmentMessageBody{}).BuildMutable()
if err != nil {
return nil, errors.Wrapf(err, "failed to create new segment message, segmentID: %d", pendingSegment.GetSegmentID())
}
// Send CreateSegmentMessage into wal.
msgID, err := m.wal.Get().Append(ctx, msg)
if err != nil {
return nil, errors.Wrapf(err, "failed to send create segment message into wal, segmentID: %d", pendingSegment.GetSegmentID())
}
// Commit it into streaming node meta.
// growing segment can be assigned now.
tx := pendingSegment.BeginModification()
tx.IntoGrowing(&limitation, msgID.TimeTick)
if err := tx.Commit(ctx); err != nil {
return nil, errors.Wrapf(err, "failed to commit modification of segment assignment into growing, segmentID: %d", pendingSegment.GetSegmentID())
}
m.logger.Info("generate new growing segment",
zap.Int64("segmentID", pendingSegment.GetSegmentID()),
zap.String("messageID", msgID.MessageID.String()),
zap.Uint64("timetick", msgID.TimeTick),
zap.String("limitationPolicy", limitation.PolicyName),
zap.Uint64("segmentBinarySize", limitation.SegmentSize),
zap.Any("extraInfo", limitation.ExtraInfo),
)
return pendingSegment, nil
}
// findPendingSegmentInMeta finds a pending segment in the meta list.
func (m *partitionSegmentManager) findPendingSegmentInMeta() *segmentAllocManager {
// Found if there's already a pending segment.
for _, segment := range m.segments {
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_PENDING {
return segment
}
}
return nil
}
// createNewPendingSegment creates a new pending segment.
// pending segment only have a segment id, it's not a real segment,
// and will be transfer into growing state until registering to datacoord.
// The segment id is always allocated from rootcoord to avoid repeated.
// Pending state is used to avoid growing segment leak at datacoord.
func (m *partitionSegmentManager) createNewPendingSegment(ctx context.Context) (*segmentAllocManager, error) {
// Allocate new segment id and create ts from remote.
segmentID, err := resource.Resource().IDAllocator().Allocate(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to allocate segment id")
}
storageVersion := storage.StorageV1
if paramtable.Get().CommonCfg.EnableStorageV2.GetAsBool() {
storageVersion = storage.StorageV2
}
meta := newSegmentAllocManager(m.pchannel, m.collectionID, m.paritionID, int64(segmentID), m.vchannel, m.metrics, storageVersion)
tx := meta.BeginModification()
tx.IntoPending()
if err := tx.Commit(ctx); err != nil {
return nil, errors.Wrap(err, "failed to commit segment assignment modification")
}
m.segments = append(m.segments, meta)
return meta, nil
}
// assignSegment assigns a segment for a assign segment request and return should trigger a seal operation.
func (m *partitionSegmentManager) assignSegment(ctx context.Context, req *AssignSegmentRequest) (*AssignSegmentResult, error) {
hitTimeTickTooOld := false
// Alloc segment for insert at allocated segments.
for _, segment := range m.segments {
result, err := segment.AllocRows(ctx, req)
if err == nil {
return result, nil
}
if errors.IsAny(err, ErrTooLargeInsert) {
// Return error directly.
// If the insert message is too large to hold by single segment, it can not be inserted anymore.
return nil, err
}
if errors.Is(err, ErrTimeTickTooOld) {
hitTimeTickTooOld = true
}
}
// If the timetick is too old for existing segment, it can not be inserted even allocate new growing segment,
// (new growing segment's timetick is always greater than the old gorwing segmet's timetick).
// Return directly to avoid unnecessary growing segment allocation.
if hitTimeTickTooOld {
return nil, ErrTimeTickTooOld
}
// If not inserted, ask a new growing segment to insert.
newGrowingSegment, err := m.allocNewGrowingSegment(ctx)
if err != nil {
return nil, err
}
return newGrowingSegment.AllocRows(ctx, req)
}

View File

@ -1,334 +0,0 @@
package manager
import (
"sync"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/policy"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/metricsutil"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
// buildNewPartitionManagers builds new partition managers.
func buildNewPartitionManagers(
wal *syncutil.Future[wal.WAL],
pchannel types.PChannelInfo,
rawMetas []*streamingpb.SegmentAssignmentMeta,
collectionInfos []*rootcoordpb.CollectionInfoOnPChannel,
metrics *metricsutil.SegmentAssignMetrics,
) (*partitionSegmentManagers, []*segmentAllocManager) {
// create a map to check if the partition exists.
partitionExist := make(map[int64]struct{}, len(collectionInfos))
// collectionMap is a map from collectionID to collectionInfo.
collectionInfoMap := make(map[int64]*rootcoordpb.CollectionInfoOnPChannel, len(collectionInfos))
for _, collectionInfo := range collectionInfos {
for _, partition := range collectionInfo.GetPartitions() {
partitionExist[partition.GetPartitionId()] = struct{}{}
}
collectionInfoMap[collectionInfo.GetCollectionId()] = collectionInfo
}
// recover the segment infos from the streaming node segment assignment meta storage
waitForSealed := make([]*segmentAllocManager, 0)
metaMaps := make(map[int64][]*segmentAllocManager)
for _, rawMeta := range rawMetas {
m := newSegmentAllocManagerFromProto(pchannel, rawMeta, metrics)
if _, ok := partitionExist[rawMeta.GetPartitionId()]; !ok {
// related collection or partition is not exist.
// should be sealed right now.
waitForSealed = append(waitForSealed, m.WithSealPolicy(policy.PolicyNamePartitionNotFound))
continue
}
if _, ok := metaMaps[rawMeta.GetPartitionId()]; !ok {
metaMaps[rawMeta.GetPartitionId()] = make([]*segmentAllocManager, 0, 2)
}
metaMaps[rawMeta.GetPartitionId()] = append(metaMaps[rawMeta.GetPartitionId()], m)
}
// create managers list.
managers := typeutil.NewConcurrentMap[int64, *partitionSegmentManager]()
for collectionID, collectionInfo := range collectionInfoMap {
for _, partition := range collectionInfo.GetPartitions() {
segmentManagers := make([]*segmentAllocManager, 0)
// recovery meta is recovered , use it.
if managers, ok := metaMaps[partition.GetPartitionId()]; ok {
segmentManagers = managers
}
// otherwise, just create a new manager.
_, ok := managers.GetOrInsert(partition.GetPartitionId(), newPartitionSegmentManager(
wal,
pchannel,
collectionInfo.GetVchannel(),
collectionID,
partition.GetPartitionId(),
segmentManagers,
metrics,
))
if ok {
panic("partition manager already exists when buildNewPartitionManagers in segment assignment service, there's a bug in system")
}
}
}
m := &partitionSegmentManagers{
mu: sync.Mutex{},
logger: resource.Resource().Logger().With(
log.FieldComponent("segment-assigner"),
zap.String("pchannel", pchannel.Name),
),
wal: wal,
pchannel: pchannel,
managers: managers,
collectionInfos: collectionInfoMap,
metrics: metrics,
}
m.updateMetrics()
return m, waitForSealed
}
// partitionSegmentManagers is a collection of partition managers.
type partitionSegmentManagers struct {
mu sync.Mutex
logger *log.MLogger
wal *syncutil.Future[wal.WAL]
pchannel types.PChannelInfo
managers *typeutil.ConcurrentMap[int64, *partitionSegmentManager] // map partitionID to partition manager
collectionInfos map[int64]*rootcoordpb.CollectionInfoOnPChannel // map collectionID to collectionInfo
metrics *metricsutil.SegmentAssignMetrics
}
// NewCollection creates a new partition manager.
func (m *partitionSegmentManagers) NewCollection(collectionID int64, vchannel string, partitionID []int64) {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.collectionInfos[collectionID]; ok {
m.logger.Warn("collection already exists when NewCollection in segment assignment service",
zap.Int64("collectionID", collectionID),
)
return
}
m.collectionInfos[collectionID] = newCollectionInfo(collectionID, vchannel, partitionID)
for _, partitionID := range partitionID {
if _, loaded := m.managers.GetOrInsert(partitionID, newPartitionSegmentManager(
m.wal,
m.pchannel,
vchannel,
collectionID,
partitionID,
make([]*segmentAllocManager, 0),
m.metrics,
)); loaded {
m.logger.Warn("partition already exists when NewCollection in segment assignment service, it's may be a bug in system",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID),
)
}
}
m.logger.Info("collection created in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.String("vchannel", vchannel),
zap.Int64s("partitionIDs", partitionID))
m.updateMetrics()
}
// NewPartition creates a new partition manager.
func (m *partitionSegmentManagers) NewPartition(collectionID int64, partitionID int64) {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.collectionInfos[collectionID]; !ok {
m.logger.Warn("collection not exists when NewPartition in segment assignment service, it's may be a bug in system",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID),
)
return
}
m.collectionInfos[collectionID].Partitions = append(m.collectionInfos[collectionID].Partitions, &rootcoordpb.PartitionInfoOnPChannel{
PartitionId: partitionID,
})
if _, loaded := m.managers.GetOrInsert(partitionID, newPartitionSegmentManager(
m.wal,
m.pchannel,
m.collectionInfos[collectionID].Vchannel,
collectionID,
partitionID,
make([]*segmentAllocManager, 0),
m.metrics,
)); loaded {
m.logger.Warn(
"partition already exists when NewPartition in segment assignment service, it's may be a bug in system",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID))
}
m.logger.Info("partition created in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.String("vchannel", m.collectionInfos[collectionID].Vchannel),
zap.Int64("partitionID", partitionID))
m.updateMetrics()
}
// Get gets a partition manager from the partition managers.
func (m *partitionSegmentManagers) Get(collectionID int64, partitionID int64) (*partitionSegmentManager, error) {
pm, ok := m.managers.Get(partitionID)
if !ok {
return nil, status.NewUnrecoverableError("partition %d in collection %d not found in segment assignment service", partitionID, collectionID)
}
return pm, nil
}
// RemoveCollection removes a collection manager from the partition managers.
// Return the segments that need to be sealed.
func (m *partitionSegmentManagers) RemoveCollection(collectionID int64) []*segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
collectionInfo, ok := m.collectionInfos[collectionID]
if !ok {
m.logger.Warn("collection not exists when RemoveCollection in segment assignment service", zap.Int64("collectionID", collectionID))
return nil
}
delete(m.collectionInfos, collectionID)
needSealed := make([]*segmentAllocManager, 0)
partitionIDs := make([]int64, 0, len(collectionInfo.Partitions))
segmentIDs := make([]int64, 0, len(collectionInfo.Partitions))
for _, partition := range collectionInfo.Partitions {
pm, ok := m.managers.Get(partition.PartitionId)
if ok {
segments := pm.CollectAllCanBeSealedAndClear(policy.PolicyNameCollectionRemoved)
partitionIDs = append(partitionIDs, partition.PartitionId)
for _, segment := range segments {
segmentIDs = append(segmentIDs, segment.GetSegmentID())
}
needSealed = append(needSealed, segments...)
m.managers.Remove(partition.PartitionId)
}
}
m.logger.Info(
"collection removed in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.Int64s("partitionIDs", partitionIDs),
zap.Int64s("segmentIDs", segmentIDs),
)
m.updateMetrics()
return needSealed
}
// RemovePartition removes a partition manager from the partition managers.
func (m *partitionSegmentManagers) RemovePartition(collectionID int64, partitionID int64) []*segmentAllocManager {
m.mu.Lock()
defer m.mu.Unlock()
collectionInfo, ok := m.collectionInfos[collectionID]
if !ok {
m.logger.Warn("collection not exists when RemovePartition in segment assignment service", zap.Int64("collectionID", collectionID))
return nil
}
partitions := make([]*rootcoordpb.PartitionInfoOnPChannel, 0, len(collectionInfo.Partitions)-1)
for _, partition := range collectionInfo.Partitions {
if partition.PartitionId != partitionID {
partitions = append(partitions, partition)
}
}
collectionInfo.Partitions = partitions
pm, loaded := m.managers.GetAndRemove(partitionID)
if !loaded {
m.logger.Warn("partition not exists when RemovePartition in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID))
return nil
}
segments := pm.CollectAllCanBeSealedAndClear(policy.PolicyNamePartitionRemoved)
segmentIDs := make([]int64, 0, len(segments))
for _, segment := range segments {
segmentIDs = append(segmentIDs, segment.GetSegmentID())
}
m.logger.Info(
"partition removed in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partitionID),
zap.Int64s("segmentIDs", segmentIDs),
)
m.updateMetrics()
return segments
}
// SealAndFenceSegmentUntil seal all segment that contains the message less than the incoming timetick.
func (m *partitionSegmentManagers) SealAndFenceSegmentUntil(collectionID int64, timetick uint64) ([]*segmentAllocManager, error) {
m.mu.Lock()
defer m.mu.Unlock()
collectionInfo, ok := m.collectionInfos[collectionID]
if !ok {
m.logger.Warn("collection not exists when Flush in segment assignment service", zap.Int64("collectionID", collectionID))
return nil, errors.New("collection not found")
}
sealedSegments := make([]*segmentAllocManager, 0)
segmentIDs := make([]int64, 0)
// collect all partitions
for _, partition := range collectionInfo.Partitions {
// Seal all segments and fence assign to the partition manager.
pm, ok := m.managers.Get(partition.PartitionId)
if !ok {
m.logger.Warn("partition not found when Flush in segment assignment service, it's may be a bug in system",
zap.Int64("collectionID", collectionID),
zap.Int64("partitionID", partition.PartitionId))
return nil, errors.New("partition not found")
}
newSealedSegments := pm.SealAndFenceSegmentUntil(timetick)
for _, segment := range newSealedSegments {
segmentIDs = append(segmentIDs, segment.GetSegmentID())
}
sealedSegments = append(sealedSegments, newSealedSegments...)
}
m.logger.Info(
"all segments sealed and fence assign until timetick in segment assignment service",
zap.Int64("collectionID", collectionID),
zap.Uint64("timetick", timetick),
zap.Int64s("segmentIDs", segmentIDs),
)
return sealedSegments, nil
}
// Range ranges the partition managers.
func (m *partitionSegmentManagers) Range(f func(pm *partitionSegmentManager)) {
m.managers.Range(func(_ int64, pm *partitionSegmentManager) bool {
f(pm)
return true
})
}
func (m *partitionSegmentManagers) updateMetrics() {
m.metrics.UpdatePartitionCount(m.managers.Len())
m.metrics.UpdateCollectionCount(len(m.collectionInfos))
}
// newCollectionInfo creates a new collection info.
func newCollectionInfo(collectionID int64, vchannel string, partitionIDs []int64) *rootcoordpb.CollectionInfoOnPChannel {
info := &rootcoordpb.CollectionInfoOnPChannel{
CollectionId: collectionID,
Vchannel: vchannel,
Partitions: make([]*rootcoordpb.PartitionInfoOnPChannel, 0, len(partitionIDs)),
}
for _, partitionID := range partitionIDs {
info.Partitions = append(info.Partitions, &rootcoordpb.PartitionInfoOnPChannel{
PartitionId: partitionID,
})
}
return info
}

View File

@ -1,299 +0,0 @@
package manager
import (
"context"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/inspector"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/metricsutil"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
// RecoverPChannelSegmentAllocManager recovers the segment assignment manager at the specified pchannel.
func RecoverPChannelSegmentAllocManager(
ctx context.Context,
pchannel types.PChannelInfo,
wal *syncutil.Future[wal.WAL],
) (*PChannelSegmentAllocManager, error) {
// recover streaming node growing segment metas.
rawMetas, err := resource.Resource().StreamingNodeCatalog().ListSegmentAssignment(ctx, pchannel.Name)
if err != nil {
return nil, errors.Wrap(err, "failed to list segment assignment from catalog")
}
// get collection and parition info from rootcoord.
mix, err := resource.Resource().MixCoordClient().GetWithContext(ctx)
if err != nil {
return nil, err
}
resp, err := mix.GetPChannelInfo(ctx, &rootcoordpb.GetPChannelInfoRequest{
Pchannel: pchannel.Name,
})
if err := merr.CheckRPCCall(resp, err); err != nil {
return nil, errors.Wrap(err, "failed to get pchannel info from rootcoord")
}
metrics := metricsutil.NewSegmentAssignMetrics(pchannel.Name)
managers, waitForSealed := buildNewPartitionManagers(wal, pchannel, rawMetas, resp.GetCollections(), metrics)
// PChannelSegmentAllocManager is the segment assign manager of determined pchannel.
logger := log.With(zap.Any("pchannel", pchannel))
return &PChannelSegmentAllocManager{
lifetime: typeutil.NewLifetime(),
logger: logger,
pchannel: pchannel,
managers: managers,
helper: newSealQueue(logger, wal, waitForSealed, metrics),
metrics: metrics,
}, nil
}
// PChannelSegmentAllocManager is a segment assign manager of determined pchannel.
type PChannelSegmentAllocManager struct {
lifetime *typeutil.Lifetime
logger *log.MLogger
pchannel types.PChannelInfo
managers *partitionSegmentManagers
// There should always
helper *sealQueue
metrics *metricsutil.SegmentAssignMetrics
}
// Channel returns the pchannel info.
func (m *PChannelSegmentAllocManager) Channel() types.PChannelInfo {
return m.pchannel
}
// NewPartitions creates a new partition with the specified partitionIDs.
func (m *PChannelSegmentAllocManager) NewCollection(collectionID int64, vchannel string, partitionIDs []int64) error {
if err := m.checkLifetime(); err != nil {
return err
}
defer m.lifetime.Done()
m.managers.NewCollection(collectionID, vchannel, partitionIDs)
return nil
}
// NewPartition creates a new partition with the specified partitionID.
func (m *PChannelSegmentAllocManager) NewPartition(collectionID int64, partitionID int64) error {
if err := m.checkLifetime(); err != nil {
return err
}
defer m.lifetime.Done()
m.managers.NewPartition(collectionID, partitionID)
return nil
}
// AssignSegment assigns a segment for a assign segment request.
func (m *PChannelSegmentAllocManager) AssignSegment(ctx context.Context, req *AssignSegmentRequest) (*AssignSegmentResult, error) {
if err := m.checkLifetime(); err != nil {
return nil, err
}
defer m.lifetime.Done()
manager, err := m.managers.Get(req.CollectionID, req.PartitionID)
if err != nil {
return nil, err
}
return manager.AssignSegment(ctx, req)
}
// RemoveCollection removes the specified collection.
func (m *PChannelSegmentAllocManager) RemoveCollection(ctx context.Context, collectionID int64) error {
if err := m.checkLifetime(); err != nil {
return err
}
defer m.lifetime.Done()
waitForSealed := m.managers.RemoveCollection(collectionID)
m.helper.AsyncSeal(waitForSealed...)
// trigger a seal operation in background rightnow.
inspector.GetSegmentSealedInspector().TriggerSealWaited(ctx, m.pchannel.Name)
// wait for all segment has been flushed.
return m.helper.WaitUntilNoWaitSeal(ctx)
}
// RemovePartition removes the specified partitions.
func (m *PChannelSegmentAllocManager) RemovePartition(ctx context.Context, collectionID int64, partitionID int64) error {
if err := m.checkLifetime(); err != nil {
return err
}
defer m.lifetime.Done()
// Remove the given partition from the partition managers.
// And seal all segments that should be sealed.
waitForSealed := m.managers.RemovePartition(collectionID, partitionID)
m.helper.AsyncSeal(waitForSealed...)
// trigger a seal operation in background rightnow.
inspector.GetSegmentSealedInspector().TriggerSealWaited(ctx, m.pchannel.Name)
// wait for all segment has been flushed.
return m.helper.WaitUntilNoWaitSeal(ctx)
}
// SealAndFenceSegmentUntil seal all segment that contains the message less than the incoming timetick.
func (m *PChannelSegmentAllocManager) SealAndFenceSegmentUntil(ctx context.Context, collectionID int64, timetick uint64) ([]int64, error) {
if err := m.checkLifetime(); err != nil {
return nil, err
}
defer m.lifetime.Done()
// All message's timetick less than incoming timetick is all belong to the output sealed segment.
// So the output sealed segment transfer into flush == all message's timetick less than incoming timetick are flushed.
sealedSegments, err := m.managers.SealAndFenceSegmentUntil(collectionID, timetick)
if err != nil {
return nil, err
}
segmentIDs := make([]int64, 0, len(sealedSegments))
for _, segment := range sealedSegments {
segmentIDs = append(segmentIDs, segment.GetSegmentID())
}
// trigger a seal operation in background rightnow.
m.helper.AsyncSeal(sealedSegments...)
// wait for all segment has been flushed.
if err := m.helper.WaitUntilNoWaitSeal(ctx); err != nil {
return nil, err
}
return segmentIDs, nil
}
// TryToSealSegments tries to seal the specified segments.
func (m *PChannelSegmentAllocManager) TryToSealSegments(ctx context.Context, infos ...stats.SegmentBelongs) {
if !m.lifetime.Add(typeutil.LifetimeStateWorking) {
return
}
defer m.lifetime.Done()
if len(infos) == 0 {
// if no segment info specified, try to seal all segments.
m.managers.Range(func(pm *partitionSegmentManager) {
m.helper.AsyncSeal(pm.CollectShouldBeSealed()...)
})
} else {
// if some segment info specified, try to seal the specified partition.
for _, info := range infos {
if pm, err := m.managers.Get(info.CollectionID, info.PartitionID); err == nil {
m.helper.AsyncSeal(pm.CollectShouldBeSealed()...)
}
}
}
m.helper.SealAllWait(ctx)
}
func (m *PChannelSegmentAllocManager) MustSealSegments(ctx context.Context, infos ...stats.SegmentBelongs) {
if !m.lifetime.Add(typeutil.LifetimeStateWorking) {
return
}
defer m.lifetime.Done()
for _, info := range infos {
if pm, err := m.managers.Get(info.CollectionID, info.PartitionID); err == nil {
if segment := pm.CollectionMustSealed(info.SegmentID); segment != nil {
m.helper.AsyncSeal(segment)
} else {
m.logger.Info(
"segment not found when trigger must seal, may be already sealed",
zap.Int64("collectionID", info.CollectionID),
zap.Int64("partitionID", info.PartitionID),
zap.Int64("segmentID", info.SegmentID),
)
}
}
}
m.helper.SealAllWait(ctx)
}
// TryToSealWaitedSegment tries to seal the wait for sealing segment.
func (m *PChannelSegmentAllocManager) TryToSealWaitedSegment(ctx context.Context) {
if !m.lifetime.Add(typeutil.LifetimeStateWorking) {
return
}
defer m.lifetime.Done()
m.helper.SealAllWait(ctx)
}
// IsNoWaitSeal returns whether the segment manager is no segment wait for seal.
func (m *PChannelSegmentAllocManager) IsNoWaitSeal() bool {
return m.helper.IsEmpty()
}
// WaitUntilNoWaitSeal waits until no segment wait for seal.
func (m *PChannelSegmentAllocManager) WaitUntilNoWaitSeal(ctx context.Context) error {
if err := m.checkLifetime(); err != nil {
return err
}
defer m.lifetime.Done()
return m.helper.WaitUntilNoWaitSeal(ctx)
}
// checkLifetime checks the lifetime of the segment manager.
func (m *PChannelSegmentAllocManager) checkLifetime() error {
if !m.lifetime.Add(typeutil.LifetimeStateWorking) {
m.logger.Warn("unreachable: segment assignment manager is not working, so the wal is on closing")
return errors.New("segment assignment manager is not working")
}
return nil
}
// Close try to persist all stats and invalid the manager.
func (m *PChannelSegmentAllocManager) Close(ctx context.Context) {
m.logger.Info("segment assignment manager start to close")
m.lifetime.SetState(typeutil.LifetimeStateStopped)
m.lifetime.Wait()
// Try to seal all wait
m.helper.SealAllWait(ctx)
m.logger.Info("seal all waited segments done, may be some not done here", zap.Int("waitCounter", m.helper.WaitCounter()))
segments := make([]*segmentAllocManager, 0)
m.managers.Range(func(pm *partitionSegmentManager) {
segments = append(segments, pm.CollectAllSegmentsAndClear()...)
})
// Try to seal the dirty segment to avoid generate too large segment.
protoSegments := make(map[int64]*streamingpb.SegmentAssignmentMeta, len(segments))
growingCnt := 0
for _, segment := range segments {
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
growingCnt++
}
if segment.IsDirtyEnough() {
// Only persist the dirty segment.
protoSegments[segment.GetSegmentID()] = segment.Snapshot()
}
}
m.logger.Info("segment assignment manager save all dirty segment assignments info",
zap.Int("dirtySegmentCount", len(protoSegments)),
zap.Int("growingSegmentCount", growingCnt),
zap.Int("segmentCount", len(segments)))
if err := resource.Resource().StreamingNodeCatalog().SaveSegmentAssignments(ctx, m.pchannel.Name, protoSegments); err != nil {
m.logger.Warn("commit segment assignment at pchannel failed", zap.Error(err))
}
// remove the stats from stats manager.
removedStatsSegmentCnt := resource.Resource().SegmentAssignStatsManager().UnregisterAllStatsOnPChannel(m.pchannel.Name)
m.logger.Info("segment assignment manager remove all segment stats from stats manager", zap.Int("removedStatsSegmentCount", removedStatsSegmentCnt))
m.metrics.Close()
}

View File

@ -1,396 +0,0 @@
package manager
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"google.golang.org/grpc"
"github.com/milvus-io/milvus/internal/mocks/mock_metastore"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/mock_wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/inspector"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
internaltypes "github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/idalloc"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
)
func TestSegmentAllocManager(t *testing.T) {
initializeTestState(t)
w := mock_wal.NewMockWAL(t)
w.EXPECT().Append(mock.Anything, mock.Anything).Return(&wal.AppendResult{
MessageID: rmq.NewRmqID(1),
TimeTick: 2,
}, nil)
f := syncutil.NewFuture[wal.WAL]()
f.Set(w)
m, err := RecoverPChannelSegmentAllocManager(context.Background(), types.PChannelInfo{Name: "v1"}, f)
assert.NoError(t, err)
assert.NotNil(t, m)
ctx := context.Background()
// Ask for a too old timetick.
result, err := m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 1,
InsertMetrics: stats.InsertMetrics{
Rows: 100,
BinarySize: 100,
},
TimeTick: 1,
})
assert.Nil(t, result)
assert.ErrorIs(t, err, ErrTimeTickTooOld)
// Ask for allocate segment
result, err = m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 1,
InsertMetrics: stats.InsertMetrics{
Rows: 100,
BinarySize: 100,
},
TimeTick: tsoutil.GetCurrentTime(),
})
assert.NoError(t, err)
assert.NotNil(t, result)
// Ask for allocate more segment, will generated new growing segment.
result2, err := m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 1,
InsertMetrics: stats.InsertMetrics{
Rows: 1024 * 1024,
BinarySize: 1024 * 1024, // 1MB setting at paramtable.
},
TimeTick: tsoutil.GetCurrentTime(),
})
assert.NoError(t, err)
assert.NotNil(t, result2)
// Ask for seal segment.
// Here already have a sealed segment, and a growing segment wait for seal, but the result is not acked.
m.TryToSealSegments(ctx)
assert.False(t, m.IsNoWaitSeal())
// The following segment assign will trigger a reach limit, so new seal segment will be created.
result3, err := m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 1,
InsertMetrics: stats.InsertMetrics{
Rows: 1,
BinarySize: 1,
},
TimeTick: tsoutil.GetCurrentTime(),
})
assert.NoError(t, err)
assert.NotNil(t, result3)
m.TryToSealSegments(ctx)
assert.False(t, m.IsNoWaitSeal()) // result2 is not acked, so new seal segment will not be sealed right away.
result.Ack()
result2.Ack()
result3.Ack()
m.TryToSealWaitedSegment(ctx)
assert.True(t, m.IsNoWaitSeal()) // result2 is acked, so new seal segment will be sealed right away.
// interactive with txn
txnManager := txn.NewTxnManager(types.PChannelInfo{Name: "test"}, nil)
msg := message.NewBeginTxnMessageBuilderV2().
WithVChannel("v1").
WithHeader(&message.BeginTxnMessageHeader{KeepaliveMilliseconds: 1000}).
WithBody(&message.BeginTxnMessageBody{}).
MustBuildMutable().
WithTimeTick(tsoutil.GetCurrentTime())
beginTxnMsg, _ := message.AsMutableBeginTxnMessageV2(msg)
txn, err := txnManager.BeginNewTxn(ctx, beginTxnMsg)
assert.NoError(t, err)
txn.BeginDone()
for i := 0; i < 3; i++ {
result, err = m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 1,
InsertMetrics: stats.InsertMetrics{
Rows: 1024 * 1024,
BinarySize: 1024 * 1024, // 1MB setting at paramtable.
},
TxnSession: txn,
TimeTick: tsoutil.GetCurrentTime(),
})
assert.NoError(t, err)
result.Ack()
}
// because of there's a txn session uncommitted, so the segment will not be sealed.
m.TryToSealSegments(ctx)
assert.False(t, m.IsNoWaitSeal())
err = txn.RequestCommitAndWait(context.Background(), 0)
assert.NoError(t, err)
txn.CommitDone()
m.TryToSealSegments(ctx)
assert.True(t, m.IsNoWaitSeal())
// Try to seal a partition.
m.TryToSealSegments(ctx, stats.SegmentBelongs{
CollectionID: 1,
VChannel: "v1",
PartitionID: 2,
PChannel: "v1",
SegmentID: 3,
})
assert.True(t, m.IsNoWaitSeal())
// Try to seal with a policy
resource.Resource().SegmentAssignStatsManager().UpdateOnSync(6000, stats.SyncOperationMetrics{
BinLogCounterIncr: 100,
})
// ask a unacknowledgement seal for partition 3 to avoid seal operation.
result, err = m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 3,
InsertMetrics: stats.InsertMetrics{
Rows: 100,
BinarySize: 100,
},
TimeTick: tsoutil.GetCurrentTime(),
})
assert.NoError(t, err)
assert.NotNil(t, result)
// Should be collected but not sealed.
m.TryToSealSegments(ctx)
assert.False(t, m.IsNoWaitSeal())
result.Ack()
// Should be sealed.
m.TryToSealSegments(ctx)
assert.True(t, m.IsNoWaitSeal())
// Test fence
ts := tsoutil.GetCurrentTime()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
ids, err := m.SealAndFenceSegmentUntil(ctx, 1, ts)
assert.Error(t, err)
assert.ErrorIs(t, err, context.DeadlineExceeded)
assert.Empty(t, ids)
assert.False(t, m.IsNoWaitSeal())
m.TryToSealSegments(ctx)
assert.True(t, m.IsNoWaitSeal())
result, err = m.AssignSegment(ctx, &AssignSegmentRequest{
CollectionID: 1,
PartitionID: 3,
InsertMetrics: stats.InsertMetrics{
Rows: 100,
BinarySize: 100,
},
TimeTick: ts,
})
assert.ErrorIs(t, err, ErrFencedAssign)
assert.Nil(t, result)
m.Close(ctx)
}
func TestCreateAndDropCollection(t *testing.T) {
initializeTestState(t)
w := mock_wal.NewMockWAL(t)
w.EXPECT().Append(mock.Anything, mock.Anything).Return(&wal.AppendResult{
MessageID: rmq.NewRmqID(1),
TimeTick: 1,
}, nil)
f := syncutil.NewFuture[wal.WAL]()
f.Set(w)
m, err := RecoverPChannelSegmentAllocManager(context.Background(), types.PChannelInfo{Name: "v1"}, f)
assert.NoError(t, err)
assert.NotNil(t, m)
m.MustSealSegments(context.Background(), stats.SegmentBelongs{
PChannel: "v1",
VChannel: "v1",
CollectionID: 1,
PartitionID: 2,
SegmentID: 4000,
})
inspector.GetSegmentSealedInspector().RegisterPChannelManager(m)
ctx := context.Background()
testRequest := &AssignSegmentRequest{
CollectionID: 100,
PartitionID: 101,
InsertMetrics: stats.InsertMetrics{
Rows: 100,
BinarySize: 200,
},
TimeTick: tsoutil.GetCurrentTime(),
}
resp, err := m.AssignSegment(ctx, testRequest)
assert.Error(t, err)
assert.Nil(t, resp)
m.NewCollection(100, "v1", []int64{101, 102, 103})
resp, err = m.AssignSegment(ctx, testRequest)
assert.NoError(t, err)
assert.NotNil(t, resp)
resp.Ack()
testRequest.PartitionID = 104
resp, err = m.AssignSegment(ctx, testRequest)
assert.Error(t, err)
assert.Nil(t, resp)
m.NewPartition(100, 104)
resp, err = m.AssignSegment(ctx, testRequest)
assert.NoError(t, err)
assert.NotNil(t, resp)
resp.Ack()
m.RemovePartition(ctx, 100, 104)
assert.True(t, m.IsNoWaitSeal())
resp, err = m.AssignSegment(ctx, testRequest)
assert.Error(t, err)
assert.Nil(t, resp)
m.RemoveCollection(ctx, 100)
resp, err = m.AssignSegment(ctx, testRequest)
assert.True(t, m.IsNoWaitSeal())
assert.Error(t, err)
assert.Nil(t, resp)
}
func newStat(insertedBinarySize uint64, maxBinarySize uint64) *streamingpb.SegmentAssignmentStat {
return &streamingpb.SegmentAssignmentStat{
MaxBinarySize: maxBinarySize,
InsertedRows: insertedBinarySize,
InsertedBinarySize: insertedBinarySize,
CreateTimestamp: time.Now().Unix(),
LastModifiedTimestamp: time.Now().Unix(),
}
}
// initializeTestState is a helper function to initialize the status for testing.
func initializeTestState(t *testing.T) {
// c 1
// p 1
// s 1000p
// p 2
// s 2000g, 3000g, 4000s, 5000g
// p 3
// s 6000g
paramtable.Init()
paramtable.Get().DataCoordCfg.SegmentSealProportion.SwapTempValue("1.0")
paramtable.Get().DataCoordCfg.SegmentSealProportionJitter.SwapTempValue("0.0")
paramtable.Get().DataCoordCfg.SegmentMaxSize.SwapTempValue("1")
paramtable.Get().Save(paramtable.Get().CommonCfg.EnableStorageV2.Key, "true")
streamingNodeCatalog := mock_metastore.NewMockStreamingNodeCataLog(t)
rootCoordClient := idalloc.NewMockRootCoordClient(t)
rootCoordClient.EXPECT().AllocSegment(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, asr *datapb.AllocSegmentRequest, co ...grpc.CallOption) (*datapb.AllocSegmentResponse, error) {
return &datapb.AllocSegmentResponse{
SegmentInfo: &datapb.SegmentInfo{
ID: asr.GetSegmentId(),
CollectionID: asr.GetCollectionId(),
PartitionID: asr.GetPartitionId(),
},
Status: merr.Success(),
}, nil
})
rootCoordClient.EXPECT().GetPChannelInfo(mock.Anything, mock.Anything).Return(&rootcoordpb.GetPChannelInfoResponse{
Collections: []*rootcoordpb.CollectionInfoOnPChannel{
{
CollectionId: 1,
Partitions: []*rootcoordpb.PartitionInfoOnPChannel{
{PartitionId: 1},
{PartitionId: 2},
{PartitionId: 3},
},
},
},
}, nil)
fRootCoordClient := syncutil.NewFuture[internaltypes.MixCoordClient]()
fRootCoordClient.Set(rootCoordClient)
resource.InitForTest(t,
resource.OptStreamingNodeCatalog(streamingNodeCatalog),
resource.OptMixCoordClient(fRootCoordClient),
)
streamingNodeCatalog.EXPECT().ListSegmentAssignment(mock.Anything, mock.Anything).Return(
[]*streamingpb.SegmentAssignmentMeta{
{
CollectionId: 1,
PartitionId: 1,
SegmentId: 1000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_PENDING,
Stat: nil,
},
{
CollectionId: 1,
PartitionId: 2,
SegmentId: 2000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING,
Stat: newStat(1000, 1000),
},
{
CollectionId: 1,
PartitionId: 2,
SegmentId: 3000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING,
Stat: newStat(100, 1000),
},
{
CollectionId: 1,
PartitionId: 2,
SegmentId: 4000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED,
Stat: newStat(900, 1000),
},
{
CollectionId: 1,
PartitionId: 2,
SegmentId: 5000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING,
Stat: newStat(900, 1000),
},
{
CollectionId: 1,
PartitionId: 3,
SegmentId: 6000,
Vchannel: "v1",
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING,
Stat: newStat(100, 1000),
},
}, nil)
streamingNodeCatalog.EXPECT().SaveSegmentAssignments(mock.Anything, mock.Anything, mock.Anything).Return(nil)
}

View File

@ -1,223 +0,0 @@
package manager
import (
"context"
"sync"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/metricsutil"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
)
// newSealQueue creates a new seal helper queue.
func newSealQueue(
logger *log.MLogger,
wal *syncutil.Future[wal.WAL],
waitForSealed []*segmentAllocManager,
metrics *metricsutil.SegmentAssignMetrics,
) *sealQueue {
return &sealQueue{
cond: syncutil.NewContextCond(&sync.Mutex{}),
logger: logger,
wal: wal,
waitForSealed: waitForSealed,
waitCounter: len(waitForSealed),
metrics: metrics,
}
}
// sealQueue is a helper to seal segments.
type sealQueue struct {
cond *syncutil.ContextCond
logger *log.MLogger
wal *syncutil.Future[wal.WAL]
waitForSealed []*segmentAllocManager
waitCounter int // wait counter count the real wait segment count, it is not equal to waitForSealed length.
// some segments may be in sealing process.
metrics *metricsutil.SegmentAssignMetrics
}
// AsyncSeal adds a segment into the queue, and will be sealed at next time.
func (q *sealQueue) AsyncSeal(manager ...*segmentAllocManager) {
if q.logger.Level().Enabled(zap.DebugLevel) {
for _, m := range manager {
q.logger.Debug("segment is added into seal queue",
zap.Int("collectionID", int(m.GetCollectionID())),
zap.Int("partitionID", int(m.GetPartitionID())),
zap.Int("segmentID", int(m.GetSegmentID())),
zap.String("policy", string(m.SealPolicy())))
}
}
q.cond.LockAndBroadcast()
defer q.cond.L.Unlock()
q.waitForSealed = append(q.waitForSealed, manager...)
q.waitCounter += len(manager)
}
// SealAllWait seals all segments in the queue.
// If the operation is failure, the segments will be collected and will be retried at next time.
// Return true if all segments are sealed, otherwise return false.
func (q *sealQueue) SealAllWait(ctx context.Context) {
q.cond.L.Lock()
segments := q.waitForSealed
q.waitForSealed = make([]*segmentAllocManager, 0)
q.cond.L.Unlock()
q.tryToSealSegments(ctx, segments...)
}
// IsEmpty returns whether the queue is empty.
func (q *sealQueue) IsEmpty() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.waitCounter == 0
}
// WaitCounter returns the wait counter.
func (q *sealQueue) WaitCounter() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.waitCounter
}
// WaitUntilNoWaitSeal waits until no segment in the queue.
func (q *sealQueue) WaitUntilNoWaitSeal(ctx context.Context) error {
// wait until the wait counter becomes 0.
q.cond.L.Lock()
for q.waitCounter > 0 {
if err := q.cond.Wait(ctx); err != nil {
return err
}
}
q.cond.L.Unlock()
return nil
}
// tryToSealSegments tries to seal segments, return the undone segments.
func (q *sealQueue) tryToSealSegments(ctx context.Context, segments ...*segmentAllocManager) {
if len(segments) == 0 {
return
}
undone, sealedSegments := q.transferSegmentStateIntoSealed(ctx, segments...)
// send flush message into wal.
for collectionID, vchannelSegments := range sealedSegments {
for vchannel, segments := range vchannelSegments {
for _, segment := range segments {
if err := q.sendFlushSegmentsMessageIntoWAL(ctx, collectionID, vchannel, segment); err != nil {
q.logger.Warn("fail to send flush message into wal", zap.String("vchannel", vchannel), zap.Int64("collectionID", collectionID), zap.Error(err))
undone = append(undone, segments...)
continue
}
tx := segment.BeginModification()
tx.IntoFlushed()
if err := tx.Commit(ctx); err != nil {
q.logger.Warn("flushed segment failed at commit, maybe sent repeated flush message into wal", zap.Int64("segmentID", segment.GetSegmentID()), zap.Error(err))
undone = append(undone, segment)
continue
}
q.logger.Info("segment has been flushed",
zap.Int64("collectionID", segment.GetCollectionID()),
zap.Int64("partitionID", segment.GetPartitionID()),
zap.String("vchannel", segment.GetVChannel()),
zap.Int64("segmentID", segment.GetSegmentID()),
zap.String("sealPolicy", string(segment.SealPolicy())))
}
}
}
q.cond.LockAndBroadcast()
q.waitForSealed = append(q.waitForSealed, undone...)
// the undone one should be retried at next time, so the counter should not decrease.
q.waitCounter -= (len(segments) - len(undone))
q.cond.L.Unlock()
}
// transferSegmentStateIntoSealed transfers the segment state into sealed.
func (q *sealQueue) transferSegmentStateIntoSealed(ctx context.Context, segments ...*segmentAllocManager) ([]*segmentAllocManager, map[int64]map[string][]*segmentAllocManager) {
// undone sealed segment should be done at next time.
undone := make([]*segmentAllocManager, 0)
sealedSegments := make(map[int64]map[string][]*segmentAllocManager)
for _, segment := range segments {
logger := q.logger.With(
zap.Int64("collectionID", segment.GetCollectionID()),
zap.Int64("partitionID", segment.GetPartitionID()),
zap.String("vchannel", segment.GetVChannel()),
zap.Int64("segmentID", segment.GetSegmentID()),
zap.String("sealPolicy", string(segment.SealPolicy())))
if segment.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
tx := segment.BeginModification()
tx.IntoSealed()
if err := tx.Commit(ctx); err != nil {
logger.Warn("seal segment failed at commit", zap.Error(err))
undone = append(undone, segment)
continue
}
}
// assert here.
if segment.GetState() != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED {
panic("unreachable code: segment should be sealed here")
}
// if there'are flying acks, wait them acked, delay the sealed at next retry.
ackSem := segment.AckSem()
if ackSem > 0 {
undone = append(undone, segment)
logger.Info("segment has been sealed, but there are flying acks, delay it", zap.Int32("ackSem", ackSem))
continue
}
txnSem := segment.TxnSem()
if txnSem > 0 {
undone = append(undone, segment)
logger.Info("segment has been sealed, but there are flying txns, delay it", zap.Int32("txnSem", txnSem))
continue
}
// collect all sealed segments and no flying ack segment.
if _, ok := sealedSegments[segment.GetCollectionID()]; !ok {
sealedSegments[segment.GetCollectionID()] = make(map[string][]*segmentAllocManager)
}
if _, ok := sealedSegments[segment.GetCollectionID()][segment.GetVChannel()]; !ok {
sealedSegments[segment.GetCollectionID()][segment.GetVChannel()] = make([]*segmentAllocManager, 0)
}
sealedSegments[segment.GetCollectionID()][segment.GetVChannel()] = append(sealedSegments[segment.GetCollectionID()][segment.GetVChannel()], segment)
logger.Info("segment has been mark as sealed, can be flushed")
}
return undone, sealedSegments
}
// sendFlushSegmentsMessageIntoWAL sends a flush message into wal.
func (m *sealQueue) sendFlushSegmentsMessageIntoWAL(ctx context.Context, collectionID int64, vchannel string, segment *segmentAllocManager) error {
msg, err := message.NewFlushMessageBuilderV2().
WithVChannel(vchannel).
WithHeader(&message.FlushMessageHeader{
CollectionId: collectionID,
PartitionId: segment.GetPartitionID(),
SegmentId: segment.GetSegmentID(),
}).
WithBody(&message.FlushMessageBody{}).BuildMutable()
if err != nil {
return errors.Wrap(err, "at create new flush segments message")
}
msgID, err := m.wal.Get().Append(ctx, msg)
if err != nil {
m.logger.Warn("send flush message into wal failed", zap.Int64("collectionID", collectionID), zap.String("vchannel", vchannel), zap.Int64("segmentID", segment.GetSegmentID()), zap.Error(err))
return err
}
m.logger.Info("send flush message into wal", zap.Int64("collectionID", collectionID), zap.String("vchannel", vchannel), zap.Int64("segmentID", segment.GetSegmentID()), zap.Any("msgID", msgID))
return nil
}

View File

@ -1,317 +0,0 @@
package manager
import (
"context"
"time"
"github.com/cockroachdb/errors"
"go.uber.org/atomic"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/policy"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/metricsutil"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
)
const dirtyThreshold = 30 * 1024 * 1024 // 30MB
var (
ErrSegmentNotGrowing = errors.New("segment is not growing")
ErrTimeTickTooOld = errors.New("time tick is too old")
ErrNotEnoughSpace = stats.ErrNotEnoughSpace
ErrTooLargeInsert = stats.ErrTooLargeInsert
)
// newSegmentAllocManagerFromProto creates a new segment assignment meta from proto.
func newSegmentAllocManagerFromProto(
pchannel types.PChannelInfo,
inner *streamingpb.SegmentAssignmentMeta,
metrics *metricsutil.SegmentAssignMetrics,
) *segmentAllocManager {
stat := stats.NewSegmentStatFromProto(inner.Stat)
// Growing segment's stat should be registered to stats manager.
// Async sealed policy will use it.
if inner.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
resource.Resource().SegmentAssignStatsManager().RegisterNewGrowingSegment(stats.SegmentBelongs{
CollectionID: inner.GetCollectionId(),
PartitionID: inner.GetPartitionId(),
SegmentID: inner.GetSegmentId(),
PChannel: pchannel.Name,
VChannel: inner.GetVchannel(),
}, inner.GetSegmentId(), stat)
stat = nil
}
return &segmentAllocManager{
pchannel: pchannel,
inner: inner,
immutableStat: stat,
ackSem: atomic.NewInt32(0),
txnSem: atomic.NewInt32(0),
dirtyBytes: 0,
metrics: metrics,
}
}
// newSegmentAllocManager creates a new segment assignment meta.
func newSegmentAllocManager(
pchannel types.PChannelInfo,
collectionID int64,
partitionID int64,
segmentID int64,
vchannel string,
metrics *metricsutil.SegmentAssignMetrics,
storageVersion int64,
) *segmentAllocManager {
return &segmentAllocManager{
pchannel: pchannel,
inner: &streamingpb.SegmentAssignmentMeta{
CollectionId: collectionID,
PartitionId: partitionID,
SegmentId: segmentID,
Vchannel: vchannel,
State: streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_UNKNOWN,
Stat: nil,
StorageVersion: storageVersion,
},
immutableStat: nil, // immutable stat can be seen after sealed.
ackSem: atomic.NewInt32(0),
dirtyBytes: 0,
txnSem: atomic.NewInt32(0),
metrics: metrics,
}
}
// segmentAllocManager is the meta of segment assignment,
// only used to recover the assignment status on streaming node.
// !!! Not Concurrent Safe
// The state transfer is as follows:
// Pending -> Growing -> Sealed -> Flushed.
//
// The recovery process is as follows:
//
// | State | DataCoord View | Writable | WAL Status | Recovery |
// |-- | -- | -- | -- | -- |
// | Pending | Not exist | No | Not exist | 1. Check datacoord if exist; transfer into growing if exist. |
// | Growing | Exist | Yes | Insert Message Exist; Seal Message Not Exist | nothing |
// | Sealed | Exist | No | Insert Message Exist; Seal Message Maybe Exist | Resend a Seal Message and transfer into Flushed. |
// | Flushed | Exist | No | Insert Message Exist; Seal Message Exist | Already physically deleted, nothing to do |
type segmentAllocManager struct {
pchannel types.PChannelInfo
inner *streamingpb.SegmentAssignmentMeta
immutableStat *stats.SegmentStats // after sealed or flushed, the stat is immutable and cannot be seen by stats manager.
ackSem *atomic.Int32 // the ackSem is increased when segment allocRows, decreased when the segment is acked.
dirtyBytes uint64 // records the dirty bytes that didn't persist.
txnSem *atomic.Int32 // the runnint txn count of the segment.
metrics *metricsutil.SegmentAssignMetrics
sealPolicy policy.PolicyName
}
// WithSealPolicy sets the seal policy of the segment assignment meta.
func (s *segmentAllocManager) WithSealPolicy(policy policy.PolicyName) *segmentAllocManager {
s.sealPolicy = policy
return s
}
// SealPolicy returns the seal policy of the segment assignment meta.
func (s *segmentAllocManager) SealPolicy() policy.PolicyName {
return s.sealPolicy
}
// GetCollectionID returns the collection id of the segment assignment meta.
func (s *segmentAllocManager) GetCollectionID() int64 {
return s.inner.GetCollectionId()
}
// GetPartitionID returns the partition id of the segment assignment meta.
func (s *segmentAllocManager) GetPartitionID() int64 {
return s.inner.GetPartitionId()
}
// GetSegmentID returns the segment id of the segment assignment meta.
func (s *segmentAllocManager) GetSegmentID() int64 {
return s.inner.GetSegmentId()
}
func (s *segmentAllocManager) GetStorageVersion() int64 {
return s.inner.GetStorageVersion()
}
// GetVChannel returns the vchannel of the segment assignment meta.
func (s *segmentAllocManager) GetVChannel() string {
return s.inner.GetVchannel()
}
// State returns the state of the segment assignment meta.
func (s *segmentAllocManager) GetState() streamingpb.SegmentAssignmentState {
return s.inner.GetState()
}
// Stat get the stat of segments.
// Pending segment will return nil.
// Growing segment will return a snapshot.
// Sealed segment will return the final.
func (s *segmentAllocManager) GetStat() *stats.SegmentStats {
if s.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
return resource.Resource().SegmentAssignStatsManager().GetStatsOfSegment(s.GetSegmentID())
}
return s.immutableStat
}
// AckSem returns the ack sem.
func (s *segmentAllocManager) AckSem() int32 {
return s.ackSem.Load()
}
// TxnSem returns the txn sem.
func (s *segmentAllocManager) TxnSem() int32 {
return s.txnSem.Load()
}
// AllocRows ask for rows from current segment.
// Only growing and not fenced segment can alloc rows.
func (s *segmentAllocManager) AllocRows(ctx context.Context, req *AssignSegmentRequest) (*AssignSegmentResult, error) {
// if the segment is not growing or reach limit, return false directly.
if s.inner.GetState() != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
return nil, ErrSegmentNotGrowing
}
if req.TimeTick <= s.inner.GetStat().CreateSegmentTimeTick {
// The incoming insert request's timetick is less than the segment's create time tick,
// return ErrTimeTickTooOld and reallocate new timetick.
return nil, ErrTimeTickTooOld
}
err := resource.Resource().SegmentAssignStatsManager().AllocRows(s.GetSegmentID(), req.InsertMetrics)
if err != nil {
return nil, err
}
s.dirtyBytes += req.InsertMetrics.BinarySize
s.ackSem.Inc()
// register the txn session cleanup to the segment.
if req.TxnSession != nil {
s.txnSem.Inc()
req.TxnSession.RegisterCleanup(func() { s.txnSem.Dec() }, req.TimeTick)
}
// persist stats if too dirty.
s.persistStatsIfTooDirty(ctx)
return &AssignSegmentResult{
SegmentID: s.GetSegmentID(),
Acknowledge: s.ackSem,
}, nil
}
// Snapshot returns the snapshot of the segment assignment meta.
func (s *segmentAllocManager) Snapshot() *streamingpb.SegmentAssignmentMeta {
copied := proto.Clone(s.inner).(*streamingpb.SegmentAssignmentMeta)
copied.Stat = stats.NewProtoFromSegmentStat(s.GetStat())
return copied
}
// IsDirtyEnough returns if the dirty bytes is enough to persist.
func (s *segmentAllocManager) IsDirtyEnough() bool {
// only growing segment can be dirty.
return s.inner.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING && s.dirtyBytes >= dirtyThreshold
}
// PersisteStatsIfTooDirty persists the stats if the dirty bytes is too large.
func (s *segmentAllocManager) persistStatsIfTooDirty(ctx context.Context) {
if s.inner.GetState() != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
return
}
if s.dirtyBytes < dirtyThreshold {
return
}
if err := resource.Resource().StreamingNodeCatalog().SaveSegmentAssignments(ctx, s.pchannel.Name, map[int64]*streamingpb.SegmentAssignmentMeta{
s.GetSegmentID(): s.Snapshot(),
}); err != nil {
log.Warn("failed to persist stats of segment", zap.Int64("segmentID", s.GetSegmentID()), zap.Error(err))
}
s.dirtyBytes = 0
}
// BeginModification begins the modification of the segment assignment meta.
// Do a copy of the segment assignment meta, update the remote meta storage, than modifies the original.
func (s *segmentAllocManager) BeginModification() *mutableSegmentAssignmentMeta {
copied := s.Snapshot()
return &mutableSegmentAssignmentMeta{
original: s,
modifiedCopy: copied,
}
}
// mutableSegmentAssignmentMeta is the mutable version of segment assignment meta.
type mutableSegmentAssignmentMeta struct {
original *segmentAllocManager
modifiedCopy *streamingpb.SegmentAssignmentMeta
}
func (m *mutableSegmentAssignmentMeta) IntoPending() {
if m.modifiedCopy.State != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_UNKNOWN {
panic("tranfer state to pending from non-unknown state")
}
m.modifiedCopy.State = streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_PENDING
}
// IntoGrowing transfers the segment assignment meta into growing state.
func (m *mutableSegmentAssignmentMeta) IntoGrowing(limitation *policy.SegmentLimitation, createSegmentTimeTick uint64) {
if m.modifiedCopy.State != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_PENDING {
panic("tranfer state to growing from non-pending state")
}
m.modifiedCopy.State = streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING
now := time.Now().UnixNano()
m.modifiedCopy.Stat = &streamingpb.SegmentAssignmentStat{
MaxBinarySize: limitation.SegmentSize,
CreateTimestamp: now,
LastModifiedTimestamp: now,
CreateSegmentTimeTick: createSegmentTimeTick,
}
}
// IntoSealed transfers the segment assignment meta into sealed state.
func (m *mutableSegmentAssignmentMeta) IntoSealed() {
if m.modifiedCopy.State != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
panic("tranfer state to sealed from non-growing state")
}
m.modifiedCopy.State = streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED
}
// IntoFlushed transfers the segment assignment meta into flushed state.
// Will be delted physically when transfer into flushed state.
func (m *mutableSegmentAssignmentMeta) IntoFlushed() {
if m.modifiedCopy.State != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED {
panic("tranfer state to flushed from non-sealed state")
}
m.modifiedCopy.State = streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_FLUSHED
}
// Commit commits the modification.
func (m *mutableSegmentAssignmentMeta) Commit(ctx context.Context) error {
if err := resource.Resource().StreamingNodeCatalog().SaveSegmentAssignments(ctx, m.original.pchannel.Name, map[int64]*streamingpb.SegmentAssignmentMeta{
m.modifiedCopy.SegmentId: m.modifiedCopy,
}); err != nil {
return err
}
if m.original.GetState() != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING &&
m.modifiedCopy.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
// if the state transferred into growing, register the stats to stats manager.
resource.Resource().SegmentAssignStatsManager().RegisterNewGrowingSegment(stats.SegmentBelongs{
CollectionID: m.original.GetCollectionID(),
PartitionID: m.original.GetPartitionID(),
SegmentID: m.original.GetSegmentID(),
PChannel: m.original.pchannel.Name,
VChannel: m.original.GetVChannel(),
}, m.original.GetSegmentID(), stats.NewSegmentStatFromProto(m.modifiedCopy.Stat))
} else if m.original.GetState() == streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING &&
m.modifiedCopy.GetState() != streamingpb.SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING {
// if the state transferred from growing into others, remove the stats from stats manager.
m.original.immutableStat = resource.Resource().SegmentAssignStatsManager().UnregisterSealedSegment(m.original.GetSegmentID())
}
m.original.inner = m.modifiedCopy
return nil
}

View File

@ -1,130 +0,0 @@
package policy
import (
"time"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
type (
PolicyName string
SegmentLimitation = shards.SegmentLimitation
)
var (
GetSegmentLimitationPolicy = shards.GetSegmentLimitationPolicy
PolicyNamePartitionNotFound PolicyName = "partition_not_found"
PolicyNamePartitionRemoved PolicyName = "partition_removed"
PolicyNameCollectionRemoved PolicyName = "collection_removed"
PolicyNameRecover PolicyName = "recover"
PolicyNameFenced PolicyName = "fenced"
PolicyNameForce PolicyName = "force"
)
// GetSegmentAsyncSealPolicy returns the segment async seal policy.
func GetSegmentAsyncSealPolicy() []SegmentAsyncSealPolicy {
// TODO: dynamic policy can be applied here in future.
return []SegmentAsyncSealPolicy{
&sealByCapacity{},
&sealByBinlogNumber{},
&sealByLifetime{},
&sealByIdleTime{},
}
}
// SealPolicyResult is the result of the seal policy.
type SealPolicyResult struct {
PolicyName PolicyName
ShouldBeSealed bool
ExtraInfo interface{}
}
// SegmentAsyncSealPolicy is the policy to check if a segment should be sealed or not.
// Those policies are called asynchronously, so the stat is not real time.
// A policy should be stateless, and only check by segment stats.
// quick enough to be called.
type SegmentAsyncSealPolicy interface {
// ShouldBeSealed checks if the segment should be sealed, and return the reason string.
ShouldBeSealed(stats *stats.SegmentStats) SealPolicyResult
}
// sealByCapacity is a policy to seal the segment by the capacity.
type sealByCapacity struct{}
// ShouldBeSealed checks if the segment should be sealed, and return the reason string.
func (p *sealByCapacity) ShouldBeSealed(stats *stats.SegmentStats) SealPolicyResult {
return SealPolicyResult{
PolicyName: "by_capacity",
ShouldBeSealed: stats.ReachLimit,
ExtraInfo: nil,
}
}
// sealByBinlogFileExtraInfo is the extra info of the seal by binlog file number policy.
type sealByBinlogFileExtraInfo struct {
BinLogNumberLimit int
}
// sealByBinlogNumber is a policy to seal the segment by the binlog file number.
type sealByBinlogNumber struct{}
// ShouldBeSealed checks if the segment should be sealed, and return the reason string.
func (p *sealByBinlogNumber) ShouldBeSealed(stats *stats.SegmentStats) SealPolicyResult {
limit := paramtable.Get().DataCoordCfg.SegmentMaxBinlogFileNumber.GetAsInt()
shouldBeSealed := stats.BinLogCounter >= uint64(limit)
return SealPolicyResult{
PolicyName: "binlog_number",
ShouldBeSealed: shouldBeSealed,
ExtraInfo: &sealByBinlogFileExtraInfo{
BinLogNumberLimit: limit,
},
}
}
// sealByLifetimeExtraInfo is the extra info of the seal by lifetime policy.
type sealByLifetimeExtraInfo struct {
MaxLifeTime time.Duration
}
// sealByLifetime is a policy to seal the segment by the lifetime.
type sealByLifetime struct{}
// ShouldBeSealed checks if the segment should be sealed, and return the reason string.
func (p *sealByLifetime) ShouldBeSealed(stats *stats.SegmentStats) SealPolicyResult {
lifetime := paramtable.Get().DataCoordCfg.SegmentMaxLifetime.GetAsDuration(time.Second)
shouldBeSealed := time.Since(stats.CreateTime) > lifetime
return SealPolicyResult{
PolicyName: "by_lifetime",
ShouldBeSealed: shouldBeSealed,
ExtraInfo: sealByLifetimeExtraInfo{
MaxLifeTime: lifetime,
},
}
}
// sealByIdleTimeExtraInfo is the extra info of the seal by idle time policy.
type sealByIdleTimeExtraInfo struct {
IdleTime time.Duration
MinimalSize uint64
}
// sealByIdleTime is a policy to seal the segment by the idle time.
type sealByIdleTime struct{}
// ShouldBeSealed checks if the segment should be sealed, and return the reason string.
func (p *sealByIdleTime) ShouldBeSealed(stats *stats.SegmentStats) SealPolicyResult {
idleTime := paramtable.Get().DataCoordCfg.SegmentMaxIdleTime.GetAsDuration(time.Second)
minSize := uint64(paramtable.Get().DataCoordCfg.SegmentMinSizeFromIdleToSealed.GetAsInt() * 1024 * 1024)
shouldBeSealed := stats.Insert.BinarySize > minSize && time.Since(stats.LastModifiedTime) > idleTime
return SealPolicyResult{
PolicyName: "by_idle_time",
ShouldBeSealed: shouldBeSealed,
ExtraInfo: sealByIdleTimeExtraInfo{
IdleTime: idleTime,
MinimalSize: minSize,
},
}
}

View File

@ -1,268 +0,0 @@
package segment
import (
"context"
"time"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/redo"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/inspector"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/manager"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/utility"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/messagespb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
const interceptorName = "segment-assign"
var (
_ interceptors.InterceptorWithMetrics = (*segmentInterceptor)(nil)
_ interceptors.InterceptorWithReady = (*segmentInterceptor)(nil)
)
// segmentInterceptor is the implementation of segment assignment interceptor.
type segmentInterceptor struct {
ctx context.Context
cancel context.CancelFunc
logger *log.MLogger
assignManager *syncutil.Future[*manager.PChannelSegmentAllocManager]
}
func (impl *segmentInterceptor) Name() string {
return interceptorName
}
// Ready returns a channel that will be closed when the segment interceptor is ready.
func (impl *segmentInterceptor) Ready() <-chan struct{} {
// Wait for segment assignment manager ready.
return impl.assignManager.Done()
}
// DoAppend assigns segment for every partition in the message.
func (impl *segmentInterceptor) DoAppend(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (msgID message.MessageID, err error) {
switch msg.MessageType() {
case message.MessageTypeCreateCollection:
return impl.handleCreateCollection(ctx, msg, appendOp)
case message.MessageTypeDropCollection:
return impl.handleDropCollection(ctx, msg, appendOp)
case message.MessageTypeCreatePartition:
return impl.handleCreatePartition(ctx, msg, appendOp)
case message.MessageTypeDropPartition:
return impl.handleDropPartition(ctx, msg, appendOp)
case message.MessageTypeInsert:
return impl.handleInsertMessage(ctx, msg, appendOp)
case message.MessageTypeManualFlush:
return impl.handleManualFlushMessage(ctx, msg, appendOp)
default:
return appendOp(ctx, msg)
}
}
// handleCreateCollection handles the create collection message.
func (impl *segmentInterceptor) handleCreateCollection(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
createCollectionMsg, err := message.AsMutableCreateCollectionMessageV1(msg)
if err != nil {
return nil, err
}
// send the create collection message.
msgID, err := appendOp(ctx, msg)
if err != nil {
return msgID, err
}
// Set up the partition manager for the collection, new incoming insert message can be assign segment.
h := createCollectionMsg.Header()
impl.assignManager.Get().NewCollection(h.GetCollectionId(), msg.VChannel(), h.GetPartitionIds())
return msgID, nil
}
// handleDropCollection handles the drop collection message.
func (impl *segmentInterceptor) handleDropCollection(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
dropCollectionMessage, err := message.AsMutableDropCollectionMessageV1(msg)
if err != nil {
return nil, err
}
// Drop collections remove all partition managers from assignment service.
h := dropCollectionMessage.Header()
if err := impl.assignManager.Get().RemoveCollection(ctx, h.GetCollectionId()); err != nil {
return nil, err
}
// send the drop collection message.
return appendOp(ctx, msg)
}
// handleCreatePartition handles the create partition message.
func (impl *segmentInterceptor) handleCreatePartition(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
createPartitionMessage, err := message.AsMutableCreatePartitionMessageV1(msg)
if err != nil {
return nil, err
}
// send the create collection message.
msgID, err := appendOp(ctx, msg)
if err != nil {
return msgID, err
}
// Set up the partition manager for the collection, new incoming insert message can be assign segment.
h := createPartitionMessage.Header()
// error can never happens for wal lifetime control.
_ = impl.assignManager.Get().NewPartition(h.GetCollectionId(), h.GetPartitionId())
return msgID, nil
}
// handleDropPartition handles the drop partition message.
func (impl *segmentInterceptor) handleDropPartition(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
dropPartitionMessage, err := message.AsMutableDropPartitionMessageV1(msg)
if err != nil {
return nil, err
}
// drop partition, remove the partition manager from assignment service.
h := dropPartitionMessage.Header()
if err := impl.assignManager.Get().RemovePartition(ctx, h.GetCollectionId(), h.GetPartitionId()); err != nil {
return nil, err
}
// send the create collection message.
return appendOp(ctx, msg)
}
// handleInsertMessage handles the insert message.
func (impl *segmentInterceptor) handleInsertMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
insertMsg, err := message.AsMutableInsertMessageV1(msg)
if err != nil {
return nil, err
}
// Assign segment for insert message.
// !!! Current implementation a insert message only has one parition, but we need to merge the message for partition-key in future.
header := insertMsg.Header()
for _, partition := range header.GetPartitions() {
result, err := impl.assignManager.Get().AssignSegment(ctx, &manager.AssignSegmentRequest{
CollectionID: header.GetCollectionId(),
PartitionID: partition.GetPartitionId(),
InsertMetrics: stats.InsertMetrics{
Rows: partition.GetRows(),
BinarySize: uint64(msg.EstimateSize()), // TODO: Use parition.BinarySize in future when merge partitions together in one message.
},
TimeTick: msg.TimeTick(),
TxnSession: txn.GetTxnSessionFromContext(ctx),
})
if errors.Is(err, manager.ErrTimeTickTooOld) {
// If current time tick of insert message is too old to alloc segment,
// we just redo it to refresh a new latest timetick.
return nil, redo.ErrRedo
}
if errors.Is(err, manager.ErrTooLargeInsert) {
// Message is too large, so retry operation is unrecoverable, can't be retry at client side.
return nil, status.NewUnrecoverableError("insert too large, binary size: %d", msg.EstimateSize())
}
if err != nil {
return nil, err
}
// once the segment assignment is done, we need to ack the result,
// if other partitions failed to assign segment or wal write failure,
// the segment assignment will not rolled back for simple implementation.
defer result.Ack()
// Attach segment assignment to message.
partition.SegmentAssignment = &message.SegmentAssignment{
SegmentId: result.SegmentID,
}
}
// Update the insert message headers.
insertMsg.OverwriteHeader(header)
return appendOp(ctx, msg)
}
// handleManualFlushMessage handles the manual flush message.
func (impl *segmentInterceptor) handleManualFlushMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
maunalFlushMsg, err := message.AsMutableManualFlushMessageV2(msg)
if err != nil {
return nil, err
}
header := maunalFlushMsg.Header()
segmentIDs, err := impl.assignManager.Get().SealAndFenceSegmentUntil(ctx, header.GetCollectionId(), header.GetFlushTs())
if err != nil {
return nil, status.NewInner("segment seal failure with error: %s", err.Error())
}
// Modify the extra response for manual flush message.
utility.ModifyAppendResultExtra(ctx, func(old *message.ManualFlushExtraResponse) *message.ManualFlushExtraResponse {
if old == nil {
return &messagespb.ManualFlushExtraResponse{SegmentIds: segmentIDs}
}
return &messagespb.ManualFlushExtraResponse{SegmentIds: append(old.GetSegmentIds(), segmentIDs...)}
})
if len(segmentIDs) > 0 {
// There's some new segment sealed, we need to retry the manual flush operation refresh the context.
// If we don't refresh the context, the sequence of message in wal will be:
// FlushTsHere -> ManualFlush -> FlushSegment1 -> FlushSegment2 -> FlushSegment3.
// After refresh the context, keep the sequence of the message in the wal with following seq:
// FlushTsHere -> FlushSegment1 -> FlushSegment2 -> FlushSegment3 -> ManualFlush.
return nil, redo.ErrRedo
}
// send the manual flush message.
msgID, err := appendOp(ctx, msg)
if err != nil {
return nil, err
}
return msgID, nil
}
// Close closes the segment interceptor.
func (impl *segmentInterceptor) Close() {
impl.cancel()
assignManager := impl.assignManager.Get()
if assignManager != nil {
// unregister the pchannels
inspector.GetSegmentSealedInspector().UnregisterPChannelManager(assignManager)
assignManager.Close(context.Background())
}
}
// recoverPChannelManager recovers PChannel Assignment Manager.
func (impl *segmentInterceptor) recoverPChannelManager(param *interceptors.InterceptorBuildParam) {
timer := typeutil.NewBackoffTimer(typeutil.BackoffTimerConfig{
Default: time.Second,
Backoff: typeutil.BackoffConfig{
InitialInterval: 10 * time.Millisecond,
Multiplier: 2.0,
MaxInterval: time.Second,
},
})
timer.EnableBackoff()
for counter := 0; ; counter++ {
pm, err := manager.RecoverPChannelSegmentAllocManager(impl.ctx, param.ChannelInfo, param.WAL)
if err != nil {
ch, d := timer.NextTimer()
impl.logger.Warn("recover PChannel Assignment Manager failed, wait a backoff", zap.Int("retry", counter), zap.Duration("nextRetryInterval", d), zap.Error(err))
select {
case <-impl.ctx.Done():
impl.logger.Info("segment interceptor has been closed", zap.Error(impl.ctx.Err()))
impl.assignManager.Set(nil)
return
case <-ch:
continue
}
}
// register the manager into inspector, to do the seal asynchronously
inspector.GetSegmentSealedInspector().RegisterPChannelManager(pm)
impl.assignManager.Set(pm)
impl.logger.Info("recover PChannel Assignment Manager success")
return
}
}

View File

@ -1,49 +0,0 @@
package stats
import (
"sync"
"github.com/milvus-io/milvus/pkg/v2/util/syncutil"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
// NewSealSignalNotifier creates a new seal signal notifier.
func NewSealSignalNotifier() *SealSignalNotifier {
return &SealSignalNotifier{
cond: syncutil.NewContextCond(&sync.Mutex{}),
signal: typeutil.NewSet[SegmentBelongs](),
}
}
// SealSignalNotifier is a notifier for seal signal.
type SealSignalNotifier struct {
cond *syncutil.ContextCond
signal typeutil.Set[SegmentBelongs]
}
// AddAndNotify adds a signal and notifies the waiter.
func (n *SealSignalNotifier) AddAndNotify(belongs SegmentBelongs) {
n.cond.LockAndBroadcast()
n.signal.Insert(belongs)
n.cond.L.Unlock()
}
func (n *SealSignalNotifier) WaitChan() <-chan struct{} {
n.cond.L.Lock()
if n.signal.Len() > 0 {
n.cond.L.Unlock()
ch := make(chan struct{})
close(ch)
return ch
}
return n.cond.WaitChan()
}
// Get gets the signal.
func (n *SealSignalNotifier) Get() typeutil.Set[SegmentBelongs] {
n.cond.L.Lock()
signal := n.signal
n.signal = typeutil.NewSet[SegmentBelongs]()
n.cond.L.Unlock()
return signal
}

View File

@ -1,238 +0,0 @@
package stats
import (
"fmt"
"sync"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/utils"
)
var (
ErrNotEnoughSpace = errors.New("not enough space")
ErrTooLargeInsert = errors.New("insert too large")
NewSegmentStatFromProto = utils.NewSegmentStatFromProto
NewProtoFromSegmentStat = utils.NewProtoFromSegmentStat
)
type (
SegmentStats = utils.SegmentStats
InsertMetrics = utils.InsertMetrics
SyncOperationMetrics = utils.SyncOperationMetrics
)
// StatsManager is the manager of stats.
// It manages the insert stats of all segments, used to check if a segment has enough space to insert or should be sealed.
// If there will be a lock contention, we can optimize it by apply lock per segment.
type StatsManager struct {
mu sync.Mutex
totalStats InsertMetrics
pchannelStats map[string]*InsertMetrics
vchannelStats map[string]*InsertMetrics
segmentStats map[int64]*SegmentStats // map[SegmentID]SegmentStats
segmentIndex map[int64]SegmentBelongs // map[SegmentID]channels
pchannelIndex map[string]map[int64]struct{} // map[PChannel]SegmentID
sealNotifier *SealSignalNotifier
}
type SegmentBelongs struct {
PChannel string
VChannel string
CollectionID int64
PartitionID int64
SegmentID int64
}
// NewStatsManager creates a new stats manager.
func NewStatsManager() *StatsManager {
return &StatsManager{
mu: sync.Mutex{},
totalStats: InsertMetrics{},
pchannelStats: make(map[string]*InsertMetrics),
vchannelStats: make(map[string]*InsertMetrics),
segmentStats: make(map[int64]*SegmentStats),
segmentIndex: make(map[int64]SegmentBelongs),
pchannelIndex: make(map[string]map[int64]struct{}),
sealNotifier: NewSealSignalNotifier(),
}
}
// RegisterNewGrowingSegment registers a new growing segment.
// delegate the stats management to stats manager.
func (m *StatsManager) RegisterNewGrowingSegment(belongs SegmentBelongs, segmentID int64, stats *SegmentStats) {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.segmentStats[segmentID]; ok {
panic(fmt.Sprintf("register a segment %d that already exist, critical bug", segmentID))
}
m.segmentStats[segmentID] = stats
m.segmentIndex[segmentID] = belongs
if _, ok := m.pchannelIndex[belongs.PChannel]; !ok {
m.pchannelIndex[belongs.PChannel] = make(map[int64]struct{})
}
m.pchannelIndex[belongs.PChannel][segmentID] = struct{}{}
m.totalStats.Collect(stats.Insert)
if _, ok := m.pchannelStats[belongs.PChannel]; !ok {
m.pchannelStats[belongs.PChannel] = &InsertMetrics{}
}
m.pchannelStats[belongs.PChannel].Collect(stats.Insert)
if _, ok := m.vchannelStats[belongs.VChannel]; !ok {
m.vchannelStats[belongs.VChannel] = &InsertMetrics{}
}
m.vchannelStats[belongs.VChannel].Collect(stats.Insert)
}
// AllocRows alloc number of rows on current segment.
func (m *StatsManager) AllocRows(segmentID int64, insert InsertMetrics) error {
m.mu.Lock()
defer m.mu.Unlock()
// Must be exist, otherwise it's a bug.
info, ok := m.segmentIndex[segmentID]
if !ok {
panic(fmt.Sprintf("alloc rows on a segment %d that not exist", segmentID))
}
stat := m.segmentStats[segmentID]
inserted := stat.AllocRows(insert)
// update the total stats if inserted.
if inserted {
m.totalStats.Collect(insert)
if _, ok := m.pchannelStats[info.PChannel]; !ok {
m.pchannelStats[info.PChannel] = &InsertMetrics{}
}
m.pchannelStats[info.PChannel].Collect(insert)
if _, ok := m.vchannelStats[info.VChannel]; !ok {
m.vchannelStats[info.VChannel] = &InsertMetrics{}
}
m.vchannelStats[info.VChannel].Collect(insert)
return nil
}
if stat.ShouldBeSealed() {
// notify seal manager to do seal the segment if stat reach the limit.
m.sealNotifier.AddAndNotify(info)
}
if stat.IsEmpty() {
return ErrTooLargeInsert
}
return ErrNotEnoughSpace
}
// SealNotifier returns the seal notifier.
func (m *StatsManager) SealNotifier() *SealSignalNotifier {
// no lock here, because it's read only.
return m.sealNotifier
}
// GetStatsOfSegment gets the stats of segment.
func (m *StatsManager) GetStatsOfSegment(segmentID int64) *SegmentStats {
m.mu.Lock()
defer m.mu.Unlock()
return m.segmentStats[segmentID].Copy()
}
// UpdateOnSync updates the stats of segment on sync.
// It's an async update operation, so it's not necessary to do success.
func (m *StatsManager) UpdateOnSync(segmentID int64, syncMetric SyncOperationMetrics) {
m.mu.Lock()
defer m.mu.Unlock()
// Must be exist, otherwise it's a bug.
if _, ok := m.segmentIndex[segmentID]; !ok {
return
}
m.segmentStats[segmentID].UpdateOnSync(syncMetric)
// binlog counter is updated, notify seal manager to do seal scanning.
m.sealNotifier.AddAndNotify(m.segmentIndex[segmentID])
}
// UnregisterSealedSegment unregisters the sealed segment.
func (m *StatsManager) UnregisterSealedSegment(segmentID int64) *SegmentStats {
m.mu.Lock()
defer m.mu.Unlock()
return m.unregisterSealedSegment(segmentID)
}
func (m *StatsManager) unregisterSealedSegment(segmentID int64) *SegmentStats {
// Must be exist, otherwise it's a bug.
info, ok := m.segmentIndex[segmentID]
if !ok {
panic(fmt.Sprintf("unregister a segment %d that not exist, critical bug", segmentID))
}
stats := m.segmentStats[segmentID]
m.totalStats.Subtract(stats.Insert)
delete(m.segmentStats, segmentID)
delete(m.segmentIndex, segmentID)
if _, ok := m.pchannelIndex[info.PChannel]; ok {
delete(m.pchannelIndex[info.PChannel], segmentID)
if len(m.pchannelIndex[info.PChannel]) == 0 {
delete(m.pchannelIndex, info.PChannel)
}
}
if _, ok := m.pchannelStats[info.PChannel]; ok {
m.pchannelStats[info.PChannel].Subtract(stats.Insert)
if m.pchannelStats[info.PChannel].BinarySize == 0 {
delete(m.pchannelStats, info.PChannel)
}
}
if _, ok := m.vchannelStats[info.VChannel]; ok {
m.vchannelStats[info.VChannel].Subtract(stats.Insert)
if m.vchannelStats[info.VChannel].BinarySize == 0 {
delete(m.vchannelStats, info.VChannel)
}
}
return stats
}
// UnregisterAllStatsOnPChannel unregisters all stats on pchannel.
func (m *StatsManager) UnregisterAllStatsOnPChannel(pchannel string) int {
m.mu.Lock()
defer m.mu.Unlock()
segmentIDs, ok := m.pchannelIndex[pchannel]
if !ok {
return 0
}
for segmentID := range segmentIDs {
m.unregisterSealedSegment(segmentID)
}
return len(segmentIDs)
}
// SealByTotalGrowingSegmentsSize seals the largest growing segment
// if the total size of growing segments in ANY vchannel exceeds the threshold.
func (m *StatsManager) SealByTotalGrowingSegmentsSize(vchannelThreshold uint64) *SegmentBelongs {
m.mu.Lock()
defer m.mu.Unlock()
for _, metrics := range m.vchannelStats {
if metrics.BinarySize >= vchannelThreshold {
var (
largestSegment int64 = 0
largestSegmentSize uint64 = 0
)
for segmentID, stats := range m.segmentStats {
if stats.Insert.BinarySize > largestSegmentSize {
largestSegmentSize = stats.Insert.BinarySize
largestSegment = segmentID
}
}
belongs, ok := m.segmentIndex[largestSegment]
if !ok {
panic("unrechable: the segmentID should always be found in segmentIndex")
}
return &belongs
}
}
return nil
}

View File

@ -1,150 +0,0 @@
package stats
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestStatsManager(t *testing.T) {
m := NewStatsManager()
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 2, SegmentID: 3}, 3, createSegmentStats(100, 100, 300))
assert.Len(t, m.segmentStats, 1)
assert.Len(t, m.vchannelStats, 1)
assert.Len(t, m.pchannelStats, 1)
assert.Len(t, m.segmentIndex, 1)
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 3, SegmentID: 4}, 4, createSegmentStats(100, 100, 300))
assert.Len(t, m.segmentStats, 2)
assert.Len(t, m.segmentIndex, 2)
assert.Len(t, m.vchannelStats, 1)
assert.Len(t, m.pchannelStats, 1)
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel2", CollectionID: 2, PartitionID: 4, SegmentID: 5}, 5, createSegmentStats(100, 100, 300))
assert.Len(t, m.segmentStats, 3)
assert.Len(t, m.segmentIndex, 3)
assert.Len(t, m.vchannelStats, 2)
assert.Len(t, m.pchannelStats, 1)
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel2", VChannel: "vchannel3", CollectionID: 2, PartitionID: 5, SegmentID: 6}, 6, createSegmentStats(100, 100, 300))
assert.Len(t, m.segmentStats, 4)
assert.Len(t, m.segmentIndex, 4)
assert.Len(t, m.vchannelStats, 3)
assert.Len(t, m.pchannelStats, 2)
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel2", VChannel: "vchannel3", CollectionID: 2, PartitionID: 5, SegmentID: 7}, 7, createSegmentStats(0, 0, 300))
assert.Len(t, m.segmentStats, 5)
assert.Len(t, m.segmentIndex, 5)
assert.Len(t, m.vchannelStats, 3)
assert.Len(t, m.pchannelStats, 2)
assert.Panics(t, func() {
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 2, SegmentID: 3}, 3, createSegmentStats(100, 100, 300))
})
shouldBlock(t, m.SealNotifier().WaitChan())
err := m.AllocRows(3, InsertMetrics{Rows: 50, BinarySize: 50})
assert.NoError(t, err)
stat := m.GetStatsOfSegment(3)
assert.Equal(t, uint64(150), stat.Insert.BinarySize)
shouldBlock(t, m.SealNotifier().WaitChan())
err = m.AllocRows(5, InsertMetrics{Rows: 250, BinarySize: 250})
assert.ErrorIs(t, err, ErrNotEnoughSpace)
<-m.SealNotifier().WaitChan()
infos := m.SealNotifier().Get()
assert.Len(t, infos, 1)
err = m.AllocRows(6, InsertMetrics{Rows: 150, BinarySize: 150})
assert.NoError(t, err)
shouldBlock(t, m.SealNotifier().WaitChan())
assert.Equal(t, uint64(250), m.vchannelStats["vchannel3"].BinarySize)
assert.Equal(t, uint64(100), m.vchannelStats["vchannel2"].BinarySize)
assert.Equal(t, uint64(250), m.vchannelStats["vchannel"].BinarySize)
assert.Equal(t, uint64(350), m.pchannelStats["pchannel"].BinarySize)
assert.Equal(t, uint64(250), m.pchannelStats["pchannel2"].BinarySize)
m.UpdateOnSync(3, SyncOperationMetrics{BinLogCounterIncr: 100})
<-m.SealNotifier().WaitChan()
infos = m.SealNotifier().Get()
assert.Len(t, infos, 1)
m.UpdateOnSync(1000, SyncOperationMetrics{BinLogCounterIncr: 100})
shouldBlock(t, m.SealNotifier().WaitChan())
err = m.AllocRows(3, InsertMetrics{Rows: 400, BinarySize: 400})
assert.ErrorIs(t, err, ErrNotEnoughSpace)
err = m.AllocRows(5, InsertMetrics{Rows: 250, BinarySize: 250})
assert.ErrorIs(t, err, ErrNotEnoughSpace)
err = m.AllocRows(6, InsertMetrics{Rows: 400, BinarySize: 400})
assert.ErrorIs(t, err, ErrNotEnoughSpace)
<-m.SealNotifier().WaitChan()
infos = m.SealNotifier().Get()
assert.Len(t, infos, 3)
err = m.AllocRows(7, InsertMetrics{Rows: 400, BinarySize: 400})
assert.ErrorIs(t, err, ErrTooLargeInsert)
shouldBlock(t, m.SealNotifier().WaitChan())
m.UnregisterSealedSegment(3)
m.UnregisterSealedSegment(4)
m.UnregisterSealedSegment(5)
m.UnregisterSealedSegment(6)
m.UnregisterSealedSegment(7)
assert.Empty(t, m.segmentStats)
assert.Empty(t, m.vchannelStats)
assert.Empty(t, m.pchannelStats)
assert.Empty(t, m.segmentIndex)
assert.Panics(t, func() {
m.AllocRows(100, InsertMetrics{Rows: 100, BinarySize: 100})
})
assert.Panics(t, func() {
m.UnregisterSealedSegment(1)
})
m.UnregisterAllStatsOnPChannel("pchannel")
m.UnregisterAllStatsOnPChannel("pchannel2")
}
func TestSealByTotalGrowingSegmentsSize(t *testing.T) {
m := NewStatsManager()
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 2, SegmentID: 3}, 3, createSegmentStats(100, 100, 300))
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 2, SegmentID: 4}, 4, createSegmentStats(100, 200, 300))
m.RegisterNewGrowingSegment(SegmentBelongs{PChannel: "pchannel", VChannel: "vchannel", CollectionID: 1, PartitionID: 2, SegmentID: 5}, 5, createSegmentStats(100, 100, 300))
belongs := m.SealByTotalGrowingSegmentsSize(401)
assert.Nil(t, belongs)
belongs = m.SealByTotalGrowingSegmentsSize(400)
assert.NotNil(t, belongs)
assert.Equal(t, int64(4), belongs.SegmentID)
m.UnregisterAllStatsOnPChannel("pchannel")
assert.Empty(t, m.pchannelStats)
assert.Empty(t, m.vchannelStats)
assert.Empty(t, m.segmentStats)
assert.Empty(t, m.segmentIndex)
}
func createSegmentStats(row uint64, binarySize uint64, maxBinarSize uint64) *SegmentStats {
return &SegmentStats{
Insert: InsertMetrics{
Rows: row,
BinarySize: binarySize,
},
MaxBinarySize: maxBinarSize,
CreateTime: time.Now(),
LastModifiedTime: time.Now(),
BinLogCounter: 0,
}
}
func shouldBlock(t *testing.T, ch <-chan struct{}) {
select {
case <-ch:
t.Errorf("should block but not")
case <-time.After(10 * time.Millisecond):
return
}
}

View File

@ -0,0 +1,19 @@
package shard
import (
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
)
func NewInterceptorBuilder() interceptors.InterceptorBuilder {
return &interceptorBuilder{}
}
type interceptorBuilder struct{}
func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) interceptors.Interceptor {
shardInterceptor := &shardInterceptor{
shardManager: param.ShardManager,
}
shardInterceptor.initOpTable()
return shardInterceptor
}

View File

@ -0,0 +1,254 @@
package shard
import (
"context"
"github.com/cockroachdb/errors"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/redo"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/stats"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/utility"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
"github.com/milvus-io/milvus/pkg/v2/proto/messagespb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
)
const interceptorName = "shard"
var _ interceptors.InterceptorWithMetrics = (*shardInterceptor)(nil)
// shardInterceptor is the implementation of shard management interceptor.
type shardInterceptor struct {
shardManager shards.ShardManager
ops map[message.MessageType]interceptors.AppendInterceptorCall
}
// initOpTable initializes the operation table for the segment interceptor.
func (impl *shardInterceptor) initOpTable() {
impl.ops = map[message.MessageType]interceptors.AppendInterceptorCall{
message.MessageTypeCreateCollection: impl.handleCreateCollection,
message.MessageTypeDropCollection: impl.handleDropCollection,
message.MessageTypeCreatePartition: impl.handleCreatePartition,
message.MessageTypeDropPartition: impl.handleDropPartition,
message.MessageTypeInsert: impl.handleInsertMessage,
message.MessageTypeDelete: impl.handleDeleteMessage,
message.MessageTypeManualFlush: impl.handleManualFlushMessage,
message.MessageTypeCreateSegment: impl.handleCreateSegment,
message.MessageTypeFlush: impl.handleFlushSegment,
}
}
// Name returns the name of the interceptor.
func (impl *shardInterceptor) Name() string {
return interceptorName
}
// DoAppend assigns segment for every partition in the message.
func (impl *shardInterceptor) DoAppend(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (msgID message.MessageID, err error) {
op, ok := impl.ops[msg.MessageType()]
if ok {
// If the message type is registered in the interceptor, use the registered operation.
return op(ctx, msg, appendOp)
}
return appendOp(ctx, msg)
}
// handleCreateCollection handles the create collection message.
func (impl *shardInterceptor) handleCreateCollection(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
createCollectionMsg := message.MustAsMutableCreateCollectionMessageV1(msg)
header := createCollectionMsg.Header()
if err := impl.shardManager.CheckIfCollectionCanBeCreated(header.GetCollectionId()); err != nil {
impl.shardManager.Logger().Warn("collection already exists when creating collection", zap.Int64("collectionID", header.GetCollectionId()))
// The collection can not be created at current shard, ignored
// TODO: idompotent for wal is required in future, but current milvus state is not recovered from wal.
// return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return msgID, err
}
impl.shardManager.CreateCollection(message.MustAsImmutableCreateCollectionMessageV1(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
// handleDropCollection handles the drop collection message.
func (impl *shardInterceptor) handleDropCollection(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
dropCollectionMessage := message.MustAsMutableDropCollectionMessageV1(msg)
if err := impl.shardManager.CheckIfCollectionExists(dropCollectionMessage.Header().GetCollectionId()); err != nil {
impl.shardManager.Logger().Warn("collection not found when dropping collection", zap.Int64("collectionID", dropCollectionMessage.Header().GetCollectionId()))
// The collection can not be dropped at current shard, ignored
// TODO: idompotent for wal is required in future, but current milvus state is not recovered from wal.
// return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return msgID, err
}
impl.shardManager.DropCollection(message.MustAsImmutableDropCollectionMessageV1(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
// handleCreatePartition handles the create partition message.
func (impl *shardInterceptor) handleCreatePartition(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
createPartitionMessage := message.MustAsMutableCreatePartitionMessageV1(msg)
h := createPartitionMessage.Header()
if err := impl.shardManager.CheckIfPartitionCanBeCreated(h.GetCollectionId(), h.GetPartitionId()); err != nil {
impl.shardManager.Logger().Warn("partition already exists when creating partition", zap.Int64("collectionID", h.GetCollectionId()), zap.Int64("partitionID", h.GetPartitionId()))
// TODO: idompotent for wal is required in future, but current milvus state is not recovered from wal.
// return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return nil, err
}
impl.shardManager.CreatePartition(message.MustAsImmutableCreatePartitionMessageV1(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
// handleDropPartition handles the drop partition message.
func (impl *shardInterceptor) handleDropPartition(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
dropPartitionMessage := message.MustAsMutableDropPartitionMessageV1(msg)
h := dropPartitionMessage.Header()
if err := impl.shardManager.CheckIfPartitionExists(h.GetCollectionId(), h.GetPartitionId()); err != nil {
impl.shardManager.Logger().Warn("partition not found when dropping partition", zap.Int64("collectionID", h.GetCollectionId()), zap.Int64("partitionID", h.GetPartitionId()))
// The partition can not be dropped at current shard, ignored
// TODO: idompotent for wal is required in future, but current milvus state is not recovered from wal.
// return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return msgID, err
}
impl.shardManager.DropPartition(message.MustAsImmutableDropPartitionMessageV1(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
// handleInsertMessage handles the insert message.
func (impl *shardInterceptor) handleInsertMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
insertMsg := message.MustAsMutableInsertMessageV1(msg)
// Assign segment for insert message.
// !!! Current implementation a insert message only has one parition, but we need to merge the message for partition-key in future.
header := insertMsg.Header()
for _, partition := range header.GetPartitions() {
req := &shards.AssignSegmentRequest{
CollectionID: header.GetCollectionId(),
PartitionID: partition.GetPartitionId(),
InsertMetrics: stats.InsertMetrics{
Rows: partition.GetRows(),
BinarySize: uint64(msg.EstimateSize()), // TODO: Use parition.BinarySize in future when merge partitions together in one message.
},
TimeTick: msg.TimeTick(),
}
if session := txn.GetTxnSessionFromContext(ctx); session != nil {
// because the shard manager use the interface, txn is a struct,
// so we need to check nil before the assignment.
req.TxnSession = session
}
result, err := impl.shardManager.AssignSegment(req)
if errors.IsAny(err, shards.ErrTimeTickTooOld, shards.ErrWaitForNewSegment, shards.ErrFencedAssign) {
// 1. time tick is too old for segment assignment.
// 2. partition is fenced.
// 3. segment is not ready.
// we just redo it to refresh a new latest timetick.
if impl.shardManager.Logger().Level().Enabled(zap.DebugLevel) {
impl.shardManager.Logger().Debug("segment assign interceptor redo insert message", zap.Object("message", msg), zap.Error(err))
}
return nil, redo.ErrRedo
}
if errors.IsAny(err, shards.ErrTooLargeInsert, shards.ErrPartitionNotFound, shards.ErrCollectionNotFound) {
// Message is too large, so retry operation is unrecoverable, can't be retry at client side.
impl.shardManager.Logger().Warn("unrecoverable insert operation", zap.Object("message", msg), zap.Error(err))
return nil, status.NewUnrecoverableError("fail to assign segment, %s", err.Error())
}
if err != nil {
return nil, err
}
// once the segment assignment is done, we need to ack the result,
// if other partitions failed to assign segment or wal write failure,
// the segment assignment will not rolled back for simple implementation.
defer result.Ack()
// Attach segment assignment to message.
partition.SegmentAssignment = &message.SegmentAssignment{
SegmentId: result.SegmentID,
}
}
// Update the insert message headers.
insertMsg.OverwriteHeader(header)
return appendOp(ctx, msg)
}
// handleDeleteMessage handles the delete message.
func (impl *shardInterceptor) handleDeleteMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
deleteMessage := message.MustAsMutableDeleteMessageV1(msg)
header := deleteMessage.Header()
if err := impl.shardManager.CheckIfCollectionExists(header.GetCollectionId()); err != nil {
// The collection can not be deleted at current shard, ignored
return nil, status.NewUnrecoverableError(err.Error())
}
return appendOp(ctx, msg)
}
// handleManualFlushMessage handles the manual flush message.
func (impl *shardInterceptor) handleManualFlushMessage(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
maunalFlushMsg := message.MustAsMutableManualFlushMessageV2(msg)
header := maunalFlushMsg.Header()
segmentIDs, err := impl.shardManager.FlushAndFenceSegmentAllocUntil(header.GetCollectionId(), msg.TimeTick())
if err != nil {
return nil, status.NewUnrecoverableError(err.Error())
}
// Modify the extra response for manual flush message.
utility.ModifyAppendResultExtra(ctx, func(old *message.ManualFlushExtraResponse) *message.ManualFlushExtraResponse {
return &messagespb.ManualFlushExtraResponse{SegmentIds: segmentIDs}
})
header.SegmentIds = segmentIDs
maunalFlushMsg.OverwriteHeader(header)
return appendOp(ctx, msg)
}
// handleCreateSegment handles the create segment message.
func (impl *shardInterceptor) handleCreateSegment(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
createSegmentMsg := message.MustAsMutableCreateSegmentMessageV2(msg)
h := createSegmentMsg.Header()
if err := impl.shardManager.CheckIfSegmentCanBeCreated(h.GetCollectionId(), h.GetPartitionId(), h.GetSegmentId()); err != nil {
// The segment can not be created at current shard, ignored
return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return nil, err
}
impl.shardManager.CreateSegment(message.MustAsImmutableCreateSegmentMessageV2(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
func (impl *shardInterceptor) handleFlushSegment(ctx context.Context, msg message.MutableMessage, appendOp interceptors.Append) (message.MessageID, error) {
flushMsg := message.MustAsMutableFlushMessageV2(msg)
h := flushMsg.Header()
if err := impl.shardManager.CheckIfSegmentCanBeFlushed(h.GetCollectionId(), h.GetPartitionId(), h.GetSegmentId()); err != nil {
// The segment can not be flushed at current shard, ignored
return nil, status.NewUnrecoverableError(err.Error())
}
msgID, err := appendOp(ctx, msg)
if err != nil {
return nil, err
}
impl.shardManager.FlushSegment(message.MustAsImmutableFlushMessageV2(msg.IntoImmutableMessage(msgID)))
return msgID, nil
}
// Close closes the segment interceptor.
func (impl *shardInterceptor) Close() {}

View File

@ -0,0 +1,230 @@
package shard
import (
"context"
"testing"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"go.uber.org/atomic"
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus/internal/mocks/streamingnode/server/wal/interceptors/shard/mock_shards"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/shards"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/utility"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/messagespb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
)
func TestShardInterceptor(t *testing.T) {
mockErr := errors.New("mock error")
b := NewInterceptorBuilder()
shardManager := mock_shards.NewMockShardManager(t)
shardManager.EXPECT().Logger().Return(log.With()).Maybe()
i := b.Build(&interceptors.InterceptorBuildParam{
ShardManager: shardManager,
})
defer i.Close()
ctx := context.Background()
appender := func(ctx context.Context, msg message.MutableMessage) (message.MessageID, error) {
return rmq.NewRmqID(1), nil
}
vchannel := "v1"
msg := message.NewCreateCollectionMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.CreateCollectionMessageHeader{
CollectionId: 1,
PartitionIds: []int64{1},
}).
WithBody(&msgpb.CreateCollectionRequest{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfCollectionCanBeCreated(mock.Anything).Return(nil)
shardManager.EXPECT().CreateCollection(mock.Anything).Return()
msgID, err := i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfCollectionCanBeCreated(mock.Anything).Unset()
shardManager.EXPECT().CheckIfCollectionCanBeCreated(mock.Anything).Return(mockErr)
shardManager.EXPECT().CreateCollection(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
msg = message.NewDropCollectionMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.DropCollectionMessageHeader{
CollectionId: 1,
}).
WithBody(&msgpb.DropCollectionRequest{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Return(nil)
shardManager.EXPECT().DropCollection(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Unset()
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Return(mockErr)
shardManager.EXPECT().DropCollection(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
msg = message.NewCreatePartitionMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.CreatePartitionMessageHeader{
CollectionId: 1,
PartitionId: 1,
}).
WithBody(&msgpb.CreatePartitionRequest{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfPartitionCanBeCreated(mock.Anything, mock.Anything).Return(nil)
shardManager.EXPECT().CreatePartition(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfPartitionCanBeCreated(mock.Anything, mock.Anything).Unset()
shardManager.EXPECT().CheckIfPartitionCanBeCreated(mock.Anything, mock.Anything).Return(mockErr)
shardManager.EXPECT().CreatePartition(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
msg = message.NewDropPartitionMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.DropPartitionMessageHeader{
CollectionId: 1,
PartitionId: 1,
}).
WithBody(&msgpb.DropPartitionRequest{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfPartitionExists(mock.Anything, mock.Anything).Return(nil)
shardManager.EXPECT().DropPartition(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfPartitionExists(mock.Anything, mock.Anything).Unset()
shardManager.EXPECT().CheckIfPartitionExists(mock.Anything, mock.Anything).Return(mockErr)
shardManager.EXPECT().DropPartition(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
msg = message.NewCreateSegmentMessageBuilderV2().
WithVChannel(vchannel).
WithHeader(&messagespb.CreateSegmentMessageHeader{
CollectionId: 1,
PartitionId: 1,
SegmentId: 1,
}).
WithBody(&messagespb.CreateSegmentMessageBody{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfSegmentCanBeCreated(mock.Anything, mock.Anything, mock.Anything).Return(nil)
shardManager.EXPECT().CreateSegment(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfSegmentCanBeCreated(mock.Anything, mock.Anything, mock.Anything).Unset()
shardManager.EXPECT().CheckIfSegmentCanBeCreated(mock.Anything, mock.Anything, mock.Anything).Return(mockErr)
shardManager.EXPECT().CreateSegment(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.Error(t, err)
assert.Nil(t, msgID)
msg = message.NewFlushMessageBuilderV2().
WithVChannel(vchannel).
WithHeader(&messagespb.FlushMessageHeader{
CollectionId: 1,
PartitionId: 1,
SegmentId: 1,
}).
WithBody(&messagespb.FlushMessageBody{}).
MustBuildMutable()
shardManager.EXPECT().CheckIfSegmentCanBeFlushed(mock.Anything, mock.Anything, mock.Anything).Return(nil)
shardManager.EXPECT().FlushSegment(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfSegmentCanBeFlushed(mock.Anything, mock.Anything, mock.Anything).Unset()
shardManager.EXPECT().CheckIfSegmentCanBeFlushed(mock.Anything, mock.Anything, mock.Anything).Return(mockErr)
shardManager.EXPECT().FlushSegment(mock.Anything).Return()
msgID, err = i.DoAppend(ctx, msg, appender)
assert.Error(t, err)
assert.Nil(t, msgID)
ctx = utility.WithExtraAppendResult(ctx, &utility.ExtraAppendResult{})
msg = message.NewManualFlushMessageBuilderV2().
WithVChannel(vchannel).
WithHeader(&messagespb.ManualFlushMessageHeader{
CollectionId: 1,
}).
WithBody(&messagespb.ManualFlushMessageBody{}).
MustBuildMutable().WithTimeTick(1)
shardManager.EXPECT().FlushAndFenceSegmentAllocUntil(mock.Anything, mock.Anything).Return(nil, nil)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().FlushAndFenceSegmentAllocUntil(mock.Anything, mock.Anything).Unset()
shardManager.EXPECT().FlushAndFenceSegmentAllocUntil(mock.Anything, mock.Anything).Return(nil, mockErr)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.Error(t, err)
assert.Nil(t, msgID)
msg = message.NewInsertMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.InsertMessageHeader{
CollectionId: 1,
Partitions: []*messagespb.PartitionSegmentAssignment{
{
PartitionId: 1,
Rows: 1,
BinarySize: 100,
},
},
}).
WithBody(&msgpb.InsertRequest{}).
MustBuildMutable().WithTimeTick(1)
shardManager.EXPECT().AssignSegment(mock.Anything).Return(&shards.AssignSegmentResult{SegmentID: 1, Acknowledge: atomic.NewInt32(1)}, nil)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().AssignSegment(mock.Anything).Unset()
shardManager.EXPECT().AssignSegment(mock.Anything).Return(nil, mockErr)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.Error(t, err)
assert.Nil(t, msgID)
msg = message.NewDeleteMessageBuilderV1().
WithVChannel(vchannel).
WithHeader(&messagespb.DeleteMessageHeader{
CollectionId: 1,
}).
WithBody(&msgpb.DeleteRequest{}).
MustBuildMutable().WithTimeTick(1)
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Unset()
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Return(nil)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.NoError(t, err)
assert.NotNil(t, msgID)
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Unset()
shardManager.EXPECT().CheckIfCollectionExists(mock.Anything).Return(mockErr)
msgID, err = i.DoAppend(ctx, msg, appender)
assert.Error(t, err)
assert.Nil(t, msgID)
}

View File

@ -55,6 +55,8 @@ func (w *segmentAllocWorker) do() {
backoff.InitialInterval = 10 * time.Millisecond
backoff.MaxInterval = 1 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
for {
err := w.doOnce()
if err == nil {
@ -110,7 +112,7 @@ func (w *segmentAllocWorker) generateNewGrowingSegmentMessage() error {
storageVersion = storage.StorageV2
}
// Getnerate growing segment limitation.
limitation := GetSegmentLimitationPolicy().GenerateLimitation()
limitation := getSegmentLimitationPolicy().GenerateLimitation()
// Create a new segment by sending a create segment message into wal directly.
w.msg = message.NewCreateSegmentMessageBuilderV2().
WithVChannel(w.vchannel).

View File

@ -53,6 +53,7 @@ func (w *segmentFlushWorker) do() {
backoff.InitialInterval = 10 * time.Millisecond
backoff.MaxInterval = 1 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
// waitForTxnManagerRecoverReady waits for the txn manager to be ready for recovery.
// The segment assignment manager lost the txnSem for the recovered txn message,

View File

@ -6,14 +6,14 @@ import (
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
// GetSegmentLimitationPolicy returns the segment limitation policy.
func GetSegmentLimitationPolicy() SegmentLimitationPolicy {
// getSegmentLimitationPolicy returns the segment limitation policy.
func getSegmentLimitationPolicy() SegmentLimitationPolicy {
// TODO: dynamic policy can be applied here in future.
return jitterSegmentLimitationPolicy{}
}
// SegmentLimitation is the limitation of the segment.
type SegmentLimitation struct {
// segmentLimitation is the limitation of the segment.
type segmentLimitation struct {
PolicyName string
SegmentSize uint64
ExtraInfo interface{}
@ -22,7 +22,7 @@ type SegmentLimitation struct {
// SegmentLimitationPolicy is the interface to generate the limitation of the segment.
type SegmentLimitationPolicy interface {
// GenerateLimitation generates the limitation of the segment.
GenerateLimitation() SegmentLimitation
GenerateLimitation() segmentLimitation
}
// jitterSegmentLimitationPolicyExtraInfo is the extra info of the jitter segment limitation policy.
@ -38,7 +38,7 @@ type jitterSegmentLimitationPolicyExtraInfo struct {
type jitterSegmentLimitationPolicy struct{}
// GenerateLimitation generates the limitation of the segment.
func (p jitterSegmentLimitationPolicy) GenerateLimitation() SegmentLimitation {
func (p jitterSegmentLimitationPolicy) GenerateLimitation() segmentLimitation {
// TODO: It's weird to set such a parameter into datacoord configuration.
// Refactor it in the future
jitter := paramtable.Get().DataCoordCfg.SegmentSealProportionJitter.GetAsFloat()
@ -49,7 +49,7 @@ func (p jitterSegmentLimitationPolicy) GenerateLimitation() SegmentLimitation {
maxSegmentSize := uint64(paramtable.Get().DataCoordCfg.SegmentMaxSize.GetAsInt64() * 1024 * 1024)
proportion := paramtable.Get().DataCoordCfg.SegmentSealProportion.GetAsFloat()
segmentSize := uint64(jitterRatio * float64(maxSegmentSize) * proportion)
return SegmentLimitation{
return segmentLimitation{
PolicyName: "jitter_segment_limitation",
SegmentSize: segmentSize,
ExtraInfo: jitterSegmentLimitationPolicyExtraInfo{

View File

@ -45,7 +45,7 @@ type ShardManagerRecoverParam struct {
}
// RecoverShardManager recovers the segment assignment manager from the recovery snapshot.
func RecoverShardManager(param *ShardManagerRecoverParam) *ShardManager {
func RecoverShardManager(param *ShardManagerRecoverParam) ShardManager {
// recover the collection infos
collections := newCollectionInfos(param.InitialRecoverSnapshot)
// recover the segment assignment infos
@ -83,7 +83,7 @@ func RecoverShardManager(param *ShardManagerRecoverParam) *ShardManager {
segmentTotal += len(segmentManagers)
}
}
m := &ShardManager{
m := &shardManagerImpl{
mu: sync.Mutex{},
ctx: ctx,
cancel: cancel,
@ -159,10 +159,10 @@ func newCollectionInfos(recoverInfos *recovery.RecoverySnapshot) map[int64]*Coll
return collectionInfoMap
}
// ShardManager manages the all shard info of collection on current pchannel.
// shardManagerImpl manages the all shard info of collection on current pchannel.
// It's a in-memory data structure, and will be recovered from recovery stroage of wal and wal itself.
// !!! Don't add any block operation (such as rpc or meta opration) in this module.
type ShardManager struct {
type shardManagerImpl struct {
log.Binder
mu sync.Mutex
@ -181,12 +181,12 @@ type CollectionInfo struct {
PartitionIDs map[int64]struct{}
}
func (m *ShardManager) Channel() types.PChannelInfo {
func (m *shardManagerImpl) Channel() types.PChannelInfo {
return m.pchannel
}
// Close try to persist all stats and invalid the manager.
func (m *ShardManager) Close() {
func (m *shardManagerImpl) Close() {
m.mu.Lock()
defer m.mu.Unlock()
@ -196,7 +196,7 @@ func (m *ShardManager) Close() {
m.metrics.Close()
}
func (m *ShardManager) updateMetrics() {
func (m *shardManagerImpl) updateMetrics() {
m.metrics.UpdatePartitionCount(len(m.partitionManagers))
m.metrics.UpdateCollectionCount(len(m.collections))
}

View File

@ -10,7 +10,7 @@ import (
// CheckIfCollectionCanBeCreated checks if a collection can be created.
// It returns false if the collection cannot be created.
func (m *ShardManager) CheckIfCollectionCanBeCreated(collectionID int64) error {
func (m *shardManagerImpl) CheckIfCollectionCanBeCreated(collectionID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -18,7 +18,7 @@ func (m *ShardManager) CheckIfCollectionCanBeCreated(collectionID int64) error {
}
// checkIfCollectionCanBeCreated checks if a collection can be created.
func (m *ShardManager) checkIfCollectionCanBeCreated(collectionID int64) error {
func (m *shardManagerImpl) checkIfCollectionCanBeCreated(collectionID int64) error {
if _, ok := m.collections[collectionID]; ok {
return ErrCollectionExists
}
@ -26,7 +26,7 @@ func (m *ShardManager) checkIfCollectionCanBeCreated(collectionID int64) error {
}
// CheckIfCollectionExists checks if a collection can be dropped.
func (m *ShardManager) CheckIfCollectionExists(collectionID int64) error {
func (m *shardManagerImpl) CheckIfCollectionExists(collectionID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -34,7 +34,7 @@ func (m *ShardManager) CheckIfCollectionExists(collectionID int64) error {
}
// checkIfCollectionExists checks if a collection exists.
func (m *ShardManager) checkIfCollectionExists(collectionID int64) error {
func (m *shardManagerImpl) checkIfCollectionExists(collectionID int64) error {
if _, ok := m.collections[collectionID]; !ok {
return ErrCollectionNotFound
}
@ -43,7 +43,7 @@ func (m *ShardManager) checkIfCollectionExists(collectionID int64) error {
// CreateCollection creates a new partition manager when create collection message is written into wal.
// After CreateCollection is called, the ddl and dml on the collection can be applied.
func (m *ShardManager) CreateCollection(msg message.ImmutableCreateCollectionMessageV1) {
func (m *shardManagerImpl) CreateCollection(msg message.ImmutableCreateCollectionMessageV1) {
collectionID := msg.Header().CollectionId
partitionIDs := msg.Header().PartitionIds
vchannel := msg.VChannel()
@ -85,7 +85,7 @@ func (m *ShardManager) CreateCollection(msg message.ImmutableCreateCollectionMes
// DropCollection drops the collection and all the partitions and segments belong to it when drop collection message is written into wal.
// After DropCollection is called, no more segments can be assigned to the collection.
// Any dml and ddl for the collection will be rejected.
func (m *ShardManager) DropCollection(msg message.ImmutableDropCollectionMessageV1) {
func (m *shardManagerImpl) DropCollection(msg message.ImmutableDropCollectionMessageV1) {
collectionID := msg.Header().CollectionId
logger := m.Logger().With(log.FieldMessage(msg))

View File

@ -0,0 +1,48 @@
package shards
import (
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard/utils"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
)
type ShardManager interface {
log.WithLogger
Channel() types.PChannelInfo
CheckIfCollectionCanBeCreated(collectionID int64) error
CheckIfCollectionExists(collectionID int64) error
CreateCollection(msg message.ImmutableCreateCollectionMessageV1)
DropCollection(msg message.ImmutableDropCollectionMessageV1)
CheckIfPartitionCanBeCreated(collectionID int64, partitionID int64) error
CheckIfPartitionExists(collectionID int64, partitionID int64) error
CreatePartition(msg message.ImmutableCreatePartitionMessageV1)
DropPartition(msg message.ImmutableDropPartitionMessageV1)
CheckIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error
CheckIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error
CreateSegment(msg message.ImmutableCreateSegmentMessageV2)
FlushSegment(msg message.ImmutableFlushMessageV2)
AssignSegment(req *AssignSegmentRequest) (*AssignSegmentResult, error)
WaitUntilGrowingSegmentReady(collectionID int64, partitonID int64) (<-chan struct{}, error)
FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error)
AsyncFlushSegment(signal utils.SealSegmentSignal)
Close()
}

View File

@ -9,7 +9,7 @@ import (
)
// CheckIfPartitionCanBeCreated checks if a partition can be created.
func (m *ShardManager) CheckIfPartitionCanBeCreated(collectionID int64, partitionID int64) error {
func (m *shardManagerImpl) CheckIfPartitionCanBeCreated(collectionID int64, partitionID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -17,7 +17,7 @@ func (m *ShardManager) CheckIfPartitionCanBeCreated(collectionID int64, partitio
}
// checkIfPartitionCanBeCreated checks if a partition can be created.
func (m *ShardManager) checkIfPartitionCanBeCreated(collectionID int64, partitionID int64) error {
func (m *shardManagerImpl) checkIfPartitionCanBeCreated(collectionID int64, partitionID int64) error {
if _, ok := m.collections[collectionID]; !ok {
return ErrCollectionNotFound
}
@ -29,7 +29,7 @@ func (m *ShardManager) checkIfPartitionCanBeCreated(collectionID int64, partitio
}
// CheckIfPartitionExists checks if a partition can be dropped.
func (m *ShardManager) CheckIfPartitionExists(collectionID int64, partitionID int64) error {
func (m *shardManagerImpl) CheckIfPartitionExists(collectionID int64, partitionID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -37,7 +37,7 @@ func (m *ShardManager) CheckIfPartitionExists(collectionID int64, partitionID in
}
// checkIfPartitionExists checks if a partition can be dropped.
func (m *ShardManager) checkIfPartitionExists(collectionID int64, partitionID int64) error {
func (m *shardManagerImpl) checkIfPartitionExists(collectionID int64, partitionID int64) error {
if _, ok := m.collections[collectionID]; !ok {
return ErrCollectionNotFound
}
@ -49,7 +49,7 @@ func (m *ShardManager) checkIfPartitionExists(collectionID int64, partitionID in
// CreatePartition creates a new partition manager when create partition message is written into wal.
// After CreatePartition is called, the dml on the partition can be applied.
func (m *ShardManager) CreatePartition(msg message.ImmutableCreatePartitionMessageV1) {
func (m *shardManagerImpl) CreatePartition(msg message.ImmutableCreatePartitionMessageV1) {
collectionID := msg.Header().CollectionId
partitionID := msg.Header().PartitionId
tiemtick := msg.TimeTick()
@ -87,7 +87,7 @@ func (m *ShardManager) CreatePartition(msg message.ImmutableCreatePartitionMessa
// DropPartition drops a partition manager when drop partition message is written into wal.
// After DropPartition is called, the dml on the partition can not be applied.
func (m *ShardManager) DropPartition(msg message.ImmutableDropPartitionMessageV1) {
func (m *shardManagerImpl) DropPartition(msg message.ImmutableDropPartitionMessageV1) {
collectionID := msg.Header().CollectionId
partitionID := msg.Header().PartitionId
logger := m.Logger().With(log.FieldMessage(msg))

View File

@ -33,7 +33,7 @@ func (r *AssignSegmentResult) Ack() {
}
// CheckIfSegmentCanBeCreated checks if a segment can be created for the specified collection and partition.
func (m *ShardManager) CheckIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error {
func (m *shardManagerImpl) CheckIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -41,7 +41,7 @@ func (m *ShardManager) CheckIfSegmentCanBeCreated(collectionID int64, partitionI
}
// checkIfSegmentCanBeCreated checks if a segment can be created for the specified collection and partition.
func (m *ShardManager) checkIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error {
func (m *shardManagerImpl) checkIfSegmentCanBeCreated(collectionID int64, partitionID int64, segmentID int64) error {
// segment can be created only if the collection and partition exists.
if err := m.checkIfPartitionExists(collectionID, partitionID); err != nil {
return err
@ -54,7 +54,7 @@ func (m *ShardManager) checkIfSegmentCanBeCreated(collectionID int64, partitionI
}
// CheckIfSegmentCanBeDropped checks if a segment can be flushed.
func (m *ShardManager) CheckIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error {
func (m *shardManagerImpl) CheckIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error {
m.mu.Lock()
defer m.mu.Unlock()
@ -62,7 +62,7 @@ func (m *ShardManager) CheckIfSegmentCanBeFlushed(collecionID int64, partitionID
}
// checkIfSegmentCanBeFlushed checks if a segment can be flushed.
func (m *ShardManager) checkIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error {
func (m *shardManagerImpl) checkIfSegmentCanBeFlushed(collecionID int64, partitionID int64, segmentID int64) error {
if err := m.checkIfPartitionExists(collecionID, partitionID); err != nil {
return err
}
@ -81,7 +81,7 @@ func (m *ShardManager) checkIfSegmentCanBeFlushed(collecionID int64, partitionID
}
// CreateSegment creates a new segment manager when create segment message is written into wal.
func (m *ShardManager) CreateSegment(msg message.ImmutableCreateSegmentMessageV2) {
func (m *shardManagerImpl) CreateSegment(msg message.ImmutableCreateSegmentMessageV2) {
logger := m.Logger().With(log.FieldMessage(msg))
m.mu.Lock()
@ -100,7 +100,7 @@ func (m *ShardManager) CreateSegment(msg message.ImmutableCreateSegmentMessageV2
}
// FlushSegment flushes the segment when flush message is written into wal.
func (m *ShardManager) FlushSegment(msg message.ImmutableFlushMessageV2) {
func (m *shardManagerImpl) FlushSegment(msg message.ImmutableFlushMessageV2) {
collectionID := msg.Header().CollectionId
partitionID := msg.Header().PartitionId
segmentID := msg.Header().SegmentId
@ -118,7 +118,7 @@ func (m *ShardManager) FlushSegment(msg message.ImmutableFlushMessageV2) {
}
// AssignSegment assigns a segment for a assign segment request.
func (m *ShardManager) AssignSegment(req *AssignSegmentRequest) (*AssignSegmentResult, error) {
func (m *shardManagerImpl) AssignSegment(req *AssignSegmentRequest) (*AssignSegmentResult, error) {
m.mu.Lock()
defer m.mu.Unlock()
@ -130,7 +130,7 @@ func (m *ShardManager) AssignSegment(req *AssignSegmentRequest) (*AssignSegmentR
}
// WaitUntilGrowingSegmentReady waits until the growing segment is ready.
func (m *ShardManager) WaitUntilGrowingSegmentReady(collectionID int64, partitonID int64) (<-chan struct{}, error) {
func (m *shardManagerImpl) WaitUntilGrowingSegmentReady(collectionID int64, partitonID int64) (<-chan struct{}, error) {
m.mu.Lock()
defer m.mu.Unlock()
@ -144,7 +144,7 @@ func (m *ShardManager) WaitUntilGrowingSegmentReady(collectionID int64, partiton
// It will be used for message like ManualFlush, SchemaChange operations that want the exists segment to be flushed.
// !!! The returned segmentIDs may be is on-flushing state(which is on-flushing, a segmentFlushWorker is running, but not send into wal yet)
// !!! The caller should promise the returned segmentIDs to be flushed.
func (m *ShardManager) FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error) {
func (m *shardManagerImpl) FlushAndFenceSegmentAllocUntil(collectionID int64, timetick uint64) ([]int64, error) {
logger := m.Logger().With(zap.Int64("collectionID", collectionID), zap.Uint64("timetick", timetick))
m.mu.Lock()
defer m.mu.Unlock()
@ -172,7 +172,7 @@ func (m *ShardManager) FlushAndFenceSegmentAllocUntil(collectionID int64, timeti
}
// AsyncFlushSegment triggers the segment to be flushed when flush message is written into wal.
func (m *ShardManager) AsyncFlushSegment(signal utils.SealSegmentSignal) {
func (m *shardManagerImpl) AsyncFlushSegment(signal utils.SealSegmentSignal) {
logger := m.Logger().With(
zap.Int64("collectionID", signal.SegmentBelongs.CollectionID),
zap.Int64("partitionID", signal.SegmentBelongs.PartitionID),

View File

@ -112,7 +112,7 @@ func TestShardManager(t *testing.T) {
},
},
TxnManager: &mockedTxnManager{},
})
}).(*shardManagerImpl)
assert.Equal(t, channel, m.Channel())
// Test Checkers

View File

@ -22,7 +22,7 @@ func newSealWorker(statsManager *StatsManager) *sealWorker {
w := &sealWorker{
statsManager: statsManager,
sealNotifier: make(chan sealSegmentIDWithPolicy, 1),
growingBytesNotifier: syncutil.NewCooldownNotifier[uint64](growingBytesNotifyCooldown, 1),
growingBytesNotifier: syncutil.NewCooldownNotifier[uint64](growingBytesNotifyCooldown, 100),
}
return w
}
@ -37,7 +37,10 @@ type sealWorker struct {
// NotifySealSegment is used to notify the seal worker to seal the segment.
func (m *sealWorker) NotifySealSegment(segmentID int64, sealPolicy policy.SealPolicy) {
go func() {
// we should async notify the seal worker to avoid blocking the caller.
m.sealNotifier <- sealSegmentIDWithPolicy{segmentID: segmentID, sealPolicy: sealPolicy}
}()
}
// NotifyGrowingBytes is used to notify the seal worker to seal the segment when the total size exceeds the threshold.

View File

@ -3,7 +3,6 @@ package timetick
import (
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/txn"
)
var _ interceptors.InterceptorBuilder = (*interceptorBuilder)(nil)
@ -26,7 +25,6 @@ func (b *interceptorBuilder) Build(param *interceptors.InterceptorBuildParam) in
return &timeTickAppendInterceptor{
operator: operator,
// TODO: it's just a placeholder, should be replaced after recovery storage is merged.
txnManager: txn.NewTxnManager(param.ChannelInfo, nil),
txnManager: param.TxnManager,
}
}

View File

@ -147,10 +147,8 @@ func (impl *timeTickAppendInterceptor) handleBegin(ctx context.Context, msg mess
// Begin transaction will generate a txn context.
session, err := impl.txnManager.BeginNewTxn(ctx, beginTxnMsg)
if err != nil {
session.BeginRollback()
return nil, nil, err
}
session.BeginDone()
return session, msg.WithTxnContext(session.TxnContext()), nil
}

View File

@ -32,7 +32,7 @@ func newTimeTickSyncOperator(param *interceptors.InterceptorBuildParam) *timeTic
zap.Any("pchannel", param.ChannelInfo),
),
interceptorBuildParam: param,
ackManager: ack.NewAckManager(param.InitializedTimeTick, param.InitializedMessageID, metrics),
ackManager: ack.NewAckManager(param.LastTimeTickMessage.TimeTick(), param.LastTimeTickMessage.LastConfirmedMessageID(), metrics),
ackDetails: ack.NewAckDetails(),
sourceID: paramtable.GetNodeID(),
metrics: metrics,

View File

@ -42,18 +42,18 @@ func TestTimeTickSyncOperator(t *testing.T) {
channel := types.PChannelInfo{Name: "test", Term: 1}
ts, _ := resource.Resource().TSOAllocator().Allocate(ctx)
lastMsg := NewTimeTickMsg(ts, nil, 0, true)
immutablelastMsg := lastMsg.IntoImmutableMessage(msgID)
param := &interceptors.InterceptorBuildParam{
ChannelInfo: channel,
WAL: walFuture,
InitializedTimeTick: ts,
InitializedMessageID: msgID,
LastTimeTickMessage: immutablelastMsg,
WriteAheadBuffer: wab.NewWriteAheadBuffer(
channel.Name,
resource.Resource().Logger().With(),
1024,
30*time.Second,
lastMsg.IntoImmutableMessage(msgID),
immutablelastMsg,
),
MVCCManager: mvcc.NewMVCCManager(ts),
}

View File

@ -27,7 +27,7 @@ func newTxnSession(
lastTimetick: timetick,
txnContext: txnContext,
inFlightCount: 0,
state: message.TxnStateBegin,
state: message.TxnStateInFlight,
doneWait: nil,
rollback: false,
metricsGuard: metricsGuard,
@ -59,30 +59,6 @@ func (s *TxnSession) TxnContext() message.TxnContext {
return s.txnContext
}
// BeginDone marks the transaction as in flight.
func (s *TxnSession) BeginDone() {
s.mu.Lock()
defer s.mu.Unlock()
if s.state != message.TxnStateBegin {
// unreachable code here.
panic("invalid state for in flight")
}
s.state = message.TxnStateInFlight
}
// BeginRollback marks the transaction as rollbacked at begin state.
func (s *TxnSession) BeginRollback() {
s.mu.Lock()
defer s.mu.Unlock()
if s.state != message.TxnStateBegin {
// unreachable code here.
panic("invalid state for rollback")
}
s.state = message.TxnStateRollbacked
}
// AddNewMessage adds a new message to the session.
func (s *TxnSession) AddNewMessage(ctx context.Context, timetick uint64) error {
s.mu.Lock()
@ -270,13 +246,13 @@ func (s *TxnSession) getDoneChan(timetick uint64, state message.TxnState) (<-cha
// checkIfExpired checks if the session is expired.
func (s *TxnSession) checkIfExpired(tt uint64) error {
if s.expired {
return status.NewTransactionExpired("some message has been expired, expired at %d, current %d", s.expiredTimeTick(), tt)
return status.NewTransactionExpired("some message of txn %d has been expired, expired at %d, current %d", s.txnContext.TxnID, s.expiredTimeTick(), tt)
}
expiredTimeTick := s.expiredTimeTick()
if tt >= expiredTimeTick {
// once the session is expired, it will never be active again.
s.expired = true
return status.NewTransactionExpired("transaction expired at %d, current %d", expiredTimeTick, tt)
return status.NewTransactionExpired("txn %d expired at %d, current %d", s.txnContext.TxnID, expiredTimeTick, tt)
}
return nil
}

View File

@ -33,26 +33,12 @@ func TestSession(t *testing.T) {
<-m.RecoverDone()
session, err := m.BeginNewTxn(ctx, newBeginTxnMessage(0, 10*time.Millisecond))
assert.Equal(t, session.VChannel(), "v1")
assert.Equal(t, session.State(), message.TxnStateBegin)
assert.Equal(t, session.State(), message.TxnStateInFlight)
assert.NotNil(t, session)
assert.NoError(t, err)
// Test Begin
assert.Equal(t, message.TxnStateBegin, session.state)
assert.False(t, session.IsExpiredOrDone(0))
expiredTs := tsoutil.AddPhysicalDurationOnTs(0, 10*time.Millisecond)
assert.True(t, session.IsExpiredOrDone(expiredTs))
session.BeginRollback()
assert.Equal(t, message.TxnStateRollbacked, session.state)
assert.True(t, session.IsExpiredOrDone(0))
session, err = m.BeginNewTxn(ctx, newBeginTxnMessage(0, 10*time.Millisecond))
assert.NoError(t, err)
session.BeginDone()
assert.Equal(t, message.TxnStateInFlight, session.state)
assert.False(t, session.IsExpiredOrDone(0))
// Test add new message
expiredTs := tsoutil.AddPhysicalDurationOnTs(0, 10*time.Millisecond)
err = session.AddNewMessage(ctx, expiredTs)
assert.Error(t, err)
serr := status.AsStreamingError(err)
@ -66,8 +52,6 @@ func TestSession(t *testing.T) {
session, err = m.BeginNewTxn(ctx, newBeginTxnMessage(0, 10*time.Millisecond))
assert.NoError(t, err)
session.BeginDone()
assert.NoError(t, err)
err = session.AddNewMessage(ctx, 0)
assert.NoError(t, err)
session.AddNewMessageDoneAndKeepalive(0)
@ -82,7 +66,6 @@ func TestSession(t *testing.T) {
// Test Commit timeout.
session, err = m.BeginNewTxn(ctx, newBeginTxnMessage(0, 10*time.Millisecond))
assert.NoError(t, err)
session.BeginDone()
err = session.AddNewMessage(ctx, 0)
assert.NoError(t, err)
@ -100,7 +83,6 @@ func TestSession(t *testing.T) {
// Test Rollback
session, _ = m.BeginNewTxn(context.Background(), newBeginTxnMessage(0, 10*time.Millisecond))
session.BeginDone()
// Rollback expired.
err = session.RequestRollback(context.Background(), expiredTs)
assert.Error(t, err)
@ -109,7 +91,6 @@ func TestSession(t *testing.T) {
// Rollback success
session, _ = m.BeginNewTxn(context.Background(), newBeginTxnMessage(0, 10*time.Millisecond))
session.BeginDone()
err = session.RequestRollback(context.Background(), 0)
assert.NoError(t, err)
assert.Equal(t, message.TxnStateOnRollback, session.state)
@ -129,7 +110,6 @@ func TestManager(t *testing.T) {
session, err := m.BeginNewTxn(context.Background(), newBeginTxnMessage(0, time.Duration(i+1)*time.Millisecond))
assert.NoError(t, err)
assert.NotNil(t, session)
session.BeginDone()
session, err = m.GetSessionOfTxn(session.TxnContext().TxnID)
assert.NoError(t, err)

View File

@ -165,7 +165,7 @@ func (m *TxnManager) GetSessionOfTxn(id message.TxnID) (*TxnSession, error) {
session, ok := m.sessions[id]
if !ok {
return nil, status.NewTransactionExpired("not found in manager")
return nil, status.NewTransactionExpired("txn %d not found in manager", id)
}
return session, nil
}

View File

@ -11,6 +11,8 @@ import (
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
)
const maxRedoLogged = 3
type InterceptorMetrics struct {
Before time.Duration
BeforeErr error
@ -24,7 +26,6 @@ func (im *InterceptorMetrics) String() string {
// AppendMetrics is the metrics for append operation.
type AppendMetrics struct {
wm *WriteMetrics
bytes int
msg message.MutableMessage
result *types.AppendResult
@ -71,9 +72,11 @@ func (m *AppendMetrics) IntoLogFields() []zap.Field {
}
for name, ims := range m.interceptors {
for i, im := range ims {
if i <= maxRedoLogged {
fields = append(fields, zap.Any(fmt.Sprintf("%s_%d", name, i), im))
}
}
}
if m.err != nil {
fields = append(fields, zap.Error(m.err))
} else {

View File

@ -43,6 +43,7 @@ func NewWriteMetrics(pchannel types.PChannelInfo, walName string) *WriteMetrics
bytes: metrics.WALAppendMessageBytes.MustCurryWith(constLabel),
total: metrics.WALAppendMessageTotal.MustCurryWith(constLabel),
walDuration: metrics.WALAppendMessageDurationSeconds.MustCurryWith(constLabel),
walimplsRetryTotal: metrics.WALImplsAppendRetryTotal.With(constLabel),
walimplsDuration: metrics.WALImplsAppendMessageDurationSeconds.MustCurryWith(constLabel),
walBeforeInterceptorDuration: metrics.WALAppendMessageBeforeInterceptorDurationSeconds.MustCurryWith(constLabel),
walAfterInterceptorDuration: metrics.WALAppendMessageAfterInterceptorDurationSeconds.MustCurryWith(constLabel),
@ -59,6 +60,7 @@ type WriteMetrics struct {
bytes prometheus.ObserverVec
total *prometheus.CounterVec
walDuration prometheus.ObserverVec
walimplsRetryTotal prometheus.Counter
walimplsDuration prometheus.ObserverVec
walBeforeInterceptorDuration prometheus.ObserverVec
walAfterInterceptorDuration prometheus.ObserverVec
@ -81,7 +83,7 @@ func (m *WriteMetrics) done(appendMetrics *AppendMetrics) {
if appendMetrics.implAppendDuration != 0 {
m.walimplsDuration.WithLabelValues(status).Observe(appendMetrics.implAppendDuration.Seconds())
}
m.bytes.WithLabelValues(status).Observe(float64(appendMetrics.bytes))
m.bytes.WithLabelValues(status).Observe(float64(appendMetrics.msg.EstimateSize()))
m.total.WithLabelValues(appendMetrics.msg.MessageType().String(), status).Inc()
m.walDuration.WithLabelValues(status).Observe(appendMetrics.appendDuration.Seconds())
for name, ims := range appendMetrics.interceptors {
@ -108,12 +110,18 @@ func (m *WriteMetrics) done(appendMetrics *AppendMetrics) {
}
}
// ObserveRetry observes the retry of the walimpls.
func (m *WriteMetrics) ObserveRetry() {
m.walimplsRetryTotal.Inc()
}
func (m *WriteMetrics) Close() {
metrics.WALAppendMessageBeforeInterceptorDurationSeconds.DeletePartialMatch(m.constLabel)
metrics.WALAppendMessageAfterInterceptorDurationSeconds.DeletePartialMatch(m.constLabel)
metrics.WALAppendMessageBytes.DeletePartialMatch(m.constLabel)
metrics.WALAppendMessageTotal.DeletePartialMatch(m.constLabel)
metrics.WALAppendMessageDurationSeconds.DeletePartialMatch(m.constLabel)
metrics.WALImplsAppendRetryTotal.DeletePartialMatch(m.constLabel)
metrics.WALImplsAppendMessageDurationSeconds.DeletePartialMatch(m.constLabel)
metrics.WALInfo.DeleteLabelValues(
paramtable.GetStringNodeID(),

View File

@ -24,7 +24,7 @@ import (
// promise there's only one consumer of wal.
// But currently, we don't implement the CAS operation of meta interface.
// Should be fixed in future.
func (rs *RecoveryStorage) backgroundTask() {
func (rs *recoveryStorageImpl) backgroundTask() {
ticker := time.NewTicker(rs.cfg.persistInterval)
defer func() {
ticker.Stop()
@ -52,7 +52,7 @@ func (rs *RecoveryStorage) backgroundTask() {
}
// persistDritySnapshotWhenClosing persists the dirty snapshot when closing the recovery storage.
func (rs *RecoveryStorage) persistDritySnapshotWhenClosing() {
func (rs *recoveryStorageImpl) persistDritySnapshotWhenClosing() {
ctx, cancel := context.WithTimeout(context.Background(), rs.cfg.gracefulTimeout)
defer cancel()
@ -61,7 +61,7 @@ func (rs *RecoveryStorage) persistDritySnapshotWhenClosing() {
}
// persistDirtySnapshot persists the dirty snapshot to the catalog.
func (rs *RecoveryStorage) persistDirtySnapshot(ctx context.Context, snapshot *RecoverySnapshot, lvl zapcore.Level) (err error) {
func (rs *recoveryStorageImpl) persistDirtySnapshot(ctx context.Context, snapshot *RecoverySnapshot, lvl zapcore.Level) (err error) {
rs.metrics.ObserveIsOnPersisting(true)
logger := rs.Logger().With(
zap.String("checkpoint", snapshot.Checkpoint.MessageID.String()),
@ -129,7 +129,7 @@ func (rs *RecoveryStorage) persistDirtySnapshot(ctx context.Context, snapshot *R
// call it in recovery storage is used to promise the drop virtual channel must be called after recovery.
// In future, the flowgraph will be deprecated, all message operation will be implement here.
// So the DropVirtualChannel will only be called once after that.
func (rs *RecoveryStorage) dropAllVirtualChannel(ctx context.Context, vcs map[string]*streamingpb.VChannelMeta) error {
func (rs *recoveryStorageImpl) dropAllVirtualChannel(ctx context.Context, vcs map[string]*streamingpb.VChannelMeta) error {
channels := make([]string, 0, len(vcs))
for channelName, vc := range vcs {
if vc.State == streamingpb.VChannelState_VCHANNEL_STATE_DROPPED {
@ -162,7 +162,7 @@ func (rs *RecoveryStorage) dropAllVirtualChannel(ctx context.Context, vcs map[st
}
// retryOperationWithBackoff retries the operation with exponential backoff.
func (rs *RecoveryStorage) retryOperationWithBackoff(ctx context.Context, logger *log.MLogger, op func(ctx context.Context) error) error {
func (rs *recoveryStorageImpl) retryOperationWithBackoff(ctx context.Context, logger *log.MLogger, op func(ctx context.Context) error) error {
backoff := rs.newBackoff()
for {
err := op(ctx)
@ -183,10 +183,12 @@ func (rs *RecoveryStorage) retryOperationWithBackoff(ctx context.Context, logger
}
// newBackoff creates a new backoff instance with the default settings.
func (rs *RecoveryStorage) newBackoff() *backoff.ExponentialBackOff {
func (rs *recoveryStorageImpl) newBackoff() *backoff.ExponentialBackOff {
backoff := backoff.NewExponentialBackOff()
backoff.InitialInterval = 10 * time.Millisecond
backoff.MaxInterval = 1 * time.Second
backoff.MaxElapsedTime = 0
backoff.Reset()
return backoff
}

View File

@ -18,7 +18,7 @@ import (
)
// recoverRecoveryInfoFromMeta retrieves the recovery info for the given channel.
func (r *RecoveryStorage) recoverRecoveryInfoFromMeta(ctx context.Context, walName string, channelInfo types.PChannelInfo, lastTimeTickMessage message.ImmutableMessage) error {
func (r *recoveryStorageImpl) recoverRecoveryInfoFromMeta(ctx context.Context, walName string, channelInfo types.PChannelInfo, lastTimeTickMessage message.ImmutableMessage) error {
r.metrics.ObserveStateChange(recoveryStorageStatePersistRecovering)
r.SetLogger(resource.Resource().Logger().With(
log.FieldComponent(componentRecoveryStorage),
@ -75,7 +75,7 @@ func (r *RecoveryStorage) recoverRecoveryInfoFromMeta(ctx context.Context, walNa
// before first streaming service is enabled, there's no recovery info for channel.
// we should initialize the recover info for the channel.
// !!! This function will only call once for each channel when the streaming service is enabled.
func (r *RecoveryStorage) initializeRecoverInfo(ctx context.Context, channelInfo types.PChannelInfo, untilMessage message.ImmutableMessage) (*streamingpb.WALCheckpoint, error) {
func (r *recoveryStorageImpl) initializeRecoverInfo(ctx context.Context, channelInfo types.PChannelInfo, untilMessage message.ImmutableMessage) (*streamingpb.WALCheckpoint, error) {
// The message that is not generated by the streaming service is not managed by the recovery storage at streamingnode.
// So we ignore it, just use the global milvus metainfo to initialize the recovery storage.
// !!! It's not a strong guarantee that keep the consistency of old arch and new arch.

View File

@ -55,3 +55,12 @@ type RecoveryStream interface {
// Close closes the recovery stream.
Close() error
}
// RecoveryStorage is an interface that is used to observe the messages from the WAL.
type RecoveryStorage interface {
// ObserveMessage observes the message from the WAL.
ObserveMessage(msg message.ImmutableMessage)
// Close closes the recovery storage.
Close()
}

View File

@ -28,7 +28,7 @@ func RecoverRecoveryStorage(
ctx context.Context,
recoveryStreamBuilder RecoveryStreamBuilder,
lastTimeTickMessage message.ImmutableMessage,
) (*RecoveryStorage, *RecoverySnapshot, error) {
) (RecoveryStorage, *RecoverySnapshot, error) {
rs := newRecoveryStorage(recoveryStreamBuilder.Channel())
if err := rs.recoverRecoveryInfoFromMeta(ctx, recoveryStreamBuilder.WALName(), recoveryStreamBuilder.Channel(), lastTimeTickMessage); err != nil {
rs.Logger().Warn("recovery storage failed", zap.Error(err))
@ -57,9 +57,9 @@ func RecoverRecoveryStorage(
}
// newRecoveryStorage creates a new recovery storage.
func newRecoveryStorage(channel types.PChannelInfo) *RecoveryStorage {
func newRecoveryStorage(channel types.PChannelInfo) *recoveryStorageImpl {
cfg := newConfig()
return &RecoveryStorage{
return &recoveryStorageImpl{
backgroundTaskNotifier: syncutil.NewAsyncTaskNotifier[struct{}](),
cfg: cfg,
mu: sync.Mutex{},
@ -71,9 +71,9 @@ func newRecoveryStorage(channel types.PChannelInfo) *RecoveryStorage {
}
}
// RecoveryStorage is a component that manages the recovery info for the streaming service.
// recoveryStorageImpl is a component that manages the recovery info for the streaming service.
// It will consume the message from the wal, consume the message in wal, and update the checkpoint for it.
type RecoveryStorage struct {
type recoveryStorageImpl struct {
log.Binder
backgroundTaskNotifier *syncutil.AsyncTaskNotifier[struct{}]
cfg *config
@ -91,7 +91,7 @@ type RecoveryStorage struct {
}
// ObserveMessage is called when a new message is observed.
func (r *RecoveryStorage) ObserveMessage(msg message.ImmutableMessage) {
func (r *recoveryStorageImpl) ObserveMessage(msg message.ImmutableMessage) {
r.mu.Lock()
defer r.mu.Unlock()
@ -99,7 +99,7 @@ func (r *RecoveryStorage) ObserveMessage(msg message.ImmutableMessage) {
}
// Close closes the recovery storage and wait the background task stop.
func (r *RecoveryStorage) Close() {
func (r *recoveryStorageImpl) Close() {
r.backgroundTaskNotifier.Cancel()
r.backgroundTaskNotifier.BlockUntilFinish()
// Stop the truncator.
@ -108,7 +108,7 @@ func (r *RecoveryStorage) Close() {
}
// notifyPersist notifies a persist operation.
func (r *RecoveryStorage) notifyPersist() {
func (r *recoveryStorageImpl) notifyPersist() {
select {
case r.persistNotifier <- struct{}{}:
default:
@ -117,7 +117,7 @@ func (r *RecoveryStorage) notifyPersist() {
// consumeDirtySnapshot consumes the dirty state and returns a snapshot to persist.
// A snapshot is always a consistent state (fully consume a message or a txn message) of the recovery storage.
func (r *RecoveryStorage) consumeDirtySnapshot() *RecoverySnapshot {
func (r *recoveryStorageImpl) consumeDirtySnapshot() *RecoverySnapshot {
r.mu.Lock()
defer r.mu.Unlock()
@ -151,7 +151,7 @@ func (r *RecoveryStorage) consumeDirtySnapshot() *RecoverySnapshot {
}
// observeMessage observes a message and update the recovery storage.
func (r *RecoveryStorage) observeMessage(msg message.ImmutableMessage) {
func (r *recoveryStorageImpl) observeMessage(msg message.ImmutableMessage) {
if msg.TimeTick() <= r.checkpoint.TimeTick {
if r.Logger().Level().Enabled(zap.DebugLevel) {
r.Logger().Debug("skip the message before the checkpoint",
@ -180,7 +180,7 @@ func (r *RecoveryStorage) observeMessage(msg message.ImmutableMessage) {
}
// The incoming message id is always sorted with timetick.
func (r *RecoveryStorage) handleMessage(msg message.ImmutableMessage) {
func (r *recoveryStorageImpl) handleMessage(msg message.ImmutableMessage) {
if msg.VChannel() != "" && msg.MessageType() != message.MessageTypeCreateCollection &&
msg.MessageType() != message.MessageTypeDropCollection && r.vchannels[msg.VChannel()] == nil {
r.detectInconsistency(msg, "vchannel not found")
@ -231,7 +231,7 @@ func (r *RecoveryStorage) handleMessage(msg message.ImmutableMessage) {
}
// handleInsert handles the insert message.
func (r *RecoveryStorage) handleInsert(msg message.ImmutableInsertMessageV1) {
func (r *recoveryStorageImpl) handleInsert(msg message.ImmutableInsertMessageV1) {
for _, partition := range msg.Header().GetPartitions() {
if segment, ok := r.segments[partition.SegmentAssignment.SegmentId]; ok && segment.IsGrowing() {
segment.ObserveInsert(msg.TimeTick(), partition)
@ -245,7 +245,7 @@ func (r *RecoveryStorage) handleInsert(msg message.ImmutableInsertMessageV1) {
}
// handleDelete handles the delete message.
func (r *RecoveryStorage) handleDelete(msg message.ImmutableDeleteMessageV1) {
func (r *recoveryStorageImpl) handleDelete(msg message.ImmutableDeleteMessageV1) {
// nothing, current delete operation is managed by flowgraph, not recovery storage.
if r.Logger().Level().Enabled(zap.DebugLevel) {
r.Logger().Debug("delete entity", log.FieldMessage(msg))
@ -253,14 +253,14 @@ func (r *RecoveryStorage) handleDelete(msg message.ImmutableDeleteMessageV1) {
}
// handleCreateSegment handles the create segment message.
func (r *RecoveryStorage) handleCreateSegment(msg message.ImmutableCreateSegmentMessageV2) {
func (r *recoveryStorageImpl) handleCreateSegment(msg message.ImmutableCreateSegmentMessageV2) {
segment := newSegmentRecoveryInfoFromCreateSegmentMessage(msg)
r.segments[segment.meta.SegmentId] = segment
r.Logger().Info("create segment", log.FieldMessage(msg))
}
// handleFlush handles the flush message.
func (r *RecoveryStorage) handleFlush(msg message.ImmutableFlushMessageV2) {
func (r *recoveryStorageImpl) handleFlush(msg message.ImmutableFlushMessageV2) {
header := msg.Header()
if segment, ok := r.segments[header.SegmentId]; ok {
segment.ObserveFlush(msg.TimeTick())
@ -269,7 +269,7 @@ func (r *RecoveryStorage) handleFlush(msg message.ImmutableFlushMessageV2) {
}
// handleManualFlush handles the manual flush message.
func (r *RecoveryStorage) handleManualFlush(msg message.ImmutableManualFlushMessageV2) {
func (r *recoveryStorageImpl) handleManualFlush(msg message.ImmutableManualFlushMessageV2) {
segments := make(map[int64]struct{}, len(msg.Header().SegmentIds))
for _, segmentID := range msg.Header().SegmentIds {
segments[segmentID] = struct{}{}
@ -278,7 +278,7 @@ func (r *RecoveryStorage) handleManualFlush(msg message.ImmutableManualFlushMess
}
// flushSegments flushes the segments in the recovery storage.
func (r *RecoveryStorage) flushSegments(msg message.ImmutableMessage, sealSegmentIDs map[int64]struct{}) {
func (r *recoveryStorageImpl) flushSegments(msg message.ImmutableMessage, sealSegmentIDs map[int64]struct{}) {
segmentIDs := make([]int64, 0)
rows := make([]uint64, 0)
binarySize := make([]uint64, 0)
@ -297,7 +297,7 @@ func (r *RecoveryStorage) flushSegments(msg message.ImmutableMessage, sealSegmen
}
// handleCreateCollection handles the create collection message.
func (r *RecoveryStorage) handleCreateCollection(msg message.ImmutableCreateCollectionMessageV1) {
func (r *recoveryStorageImpl) handleCreateCollection(msg message.ImmutableCreateCollectionMessageV1) {
if _, ok := r.vchannels[msg.VChannel()]; ok {
return
}
@ -306,7 +306,7 @@ func (r *RecoveryStorage) handleCreateCollection(msg message.ImmutableCreateColl
}
// handleDropCollection handles the drop collection message.
func (r *RecoveryStorage) handleDropCollection(msg message.ImmutableDropCollectionMessageV1) {
func (r *recoveryStorageImpl) handleDropCollection(msg message.ImmutableDropCollectionMessageV1) {
if vchannelInfo, ok := r.vchannels[msg.VChannel()]; !ok || vchannelInfo.meta.State == streamingpb.VChannelState_VCHANNEL_STATE_DROPPED {
return
}
@ -317,7 +317,7 @@ func (r *RecoveryStorage) handleDropCollection(msg message.ImmutableDropCollecti
}
// flushAllSegmentOfCollection flushes all segments of the collection.
func (r *RecoveryStorage) flushAllSegmentOfCollection(msg message.ImmutableMessage, collectionID int64) {
func (r *recoveryStorageImpl) flushAllSegmentOfCollection(msg message.ImmutableMessage, collectionID int64) {
segmentIDs := make([]int64, 0)
rows := make([]uint64, 0)
for _, segment := range r.segments {
@ -331,7 +331,7 @@ func (r *RecoveryStorage) flushAllSegmentOfCollection(msg message.ImmutableMessa
}
// handleCreatePartition handles the create partition message.
func (r *RecoveryStorage) handleCreatePartition(msg message.ImmutableCreatePartitionMessageV1) {
func (r *recoveryStorageImpl) handleCreatePartition(msg message.ImmutableCreatePartitionMessageV1) {
if vchannelInfo, ok := r.vchannels[msg.VChannel()]; !ok || vchannelInfo.meta.State == streamingpb.VChannelState_VCHANNEL_STATE_DROPPED {
return
}
@ -340,7 +340,7 @@ func (r *RecoveryStorage) handleCreatePartition(msg message.ImmutableCreateParti
}
// handleDropPartition handles the drop partition message.
func (r *RecoveryStorage) handleDropPartition(msg message.ImmutableDropPartitionMessageV1) {
func (r *recoveryStorageImpl) handleDropPartition(msg message.ImmutableDropPartitionMessageV1) {
r.vchannels[msg.VChannel()].ObserveDropPartition(msg)
// flush all existing segments.
r.flushAllSegmentOfPartition(msg, msg.Header().CollectionId, msg.Header().PartitionId)
@ -348,7 +348,7 @@ func (r *RecoveryStorage) handleDropPartition(msg message.ImmutableDropPartition
}
// flushAllSegmentOfPartition flushes all segments of the partition.
func (r *RecoveryStorage) flushAllSegmentOfPartition(msg message.ImmutableMessage, collectionID int64, partitionID int64) {
func (r *recoveryStorageImpl) flushAllSegmentOfPartition(msg message.ImmutableMessage, collectionID int64, partitionID int64) {
segmentIDs := make([]int64, 0)
rows := make([]uint64, 0)
for _, segment := range r.segments {
@ -362,7 +362,7 @@ func (r *RecoveryStorage) flushAllSegmentOfPartition(msg message.ImmutableMessag
}
// handleTxn handles the txn message.
func (r *RecoveryStorage) handleTxn(msg message.ImmutableTxnMessage) {
func (r *recoveryStorageImpl) handleTxn(msg message.ImmutableTxnMessage) {
msg.RangeOver(func(im message.ImmutableMessage) error {
r.handleMessage(im)
return nil
@ -370,11 +370,11 @@ func (r *RecoveryStorage) handleTxn(msg message.ImmutableTxnMessage) {
}
// handleImport handles the import message.
func (r *RecoveryStorage) handleImport(_ message.ImmutableImportMessageV1) {
func (r *recoveryStorageImpl) handleImport(_ message.ImmutableImportMessageV1) {
}
// handleSchemaChange handles the schema change message.
func (r *RecoveryStorage) handleSchemaChange(msg message.ImmutableSchemaChangeMessageV2) {
func (r *recoveryStorageImpl) handleSchemaChange(msg message.ImmutableSchemaChangeMessageV2) {
// when schema change happens, we need to flush all segments in the collection.
// TODO: add the flush segment list into schema change message.
// TODO: persist the schema change into recoveryinfo.
@ -382,7 +382,7 @@ func (r *RecoveryStorage) handleSchemaChange(msg message.ImmutableSchemaChangeMe
}
// detectInconsistency detects the inconsistency in the recovery storage.
func (r *RecoveryStorage) detectInconsistency(msg message.ImmutableMessage, reason string, extra ...zap.Field) {
func (r *recoveryStorageImpl) detectInconsistency(msg message.ImmutableMessage, reason string, extra ...zap.Field) {
fields := make([]zap.Field, 0, len(extra)+2)
fields = append(fields, log.FieldMessage(msg), zap.String("reason", reason))
fields = append(fields, extra...)

View File

@ -123,7 +123,8 @@ func TestRecoveryStorage(t *testing.T) {
// make sure the checkpoint is saved.
paramtable.Get().Save(paramtable.Get().StreamingCfg.WALRecoveryGracefulCloseTimeout.Key, "1000s")
}
rs, snapshot, err := RecoverRecoveryStorage(context.Background(), b, msg)
rsInterface, snapshot, err := RecoverRecoveryStorage(context.Background(), b, msg)
rs := rsInterface.(*recoveryStorageImpl)
assert.NoError(t, err)
assert.NotNil(t, rs)
assert.NotNil(t, snapshot)

View File

@ -14,7 +14,7 @@ import (
)
// recoverFromStream recovers the recovery storage from the recovery stream.
func (r *RecoveryStorage) recoverFromStream(
func (r *recoveryStorageImpl) recoverFromStream(
ctx context.Context,
recoveryStreamBuilder RecoveryStreamBuilder,
lastTimeTickMessage message.ImmutableMessage,
@ -72,7 +72,7 @@ L:
// getSnapshot returns the snapshot of the recovery storage.
// Use this function to get the snapshot after recovery is finished,
// and use the snapshot to recover all write ahead components.
func (r *RecoveryStorage) getSnapshot() *RecoverySnapshot {
func (r *recoveryStorageImpl) getSnapshot() *RecoverySnapshot {
segments := make(map[int64]*streamingpb.SegmentAssignmentMeta, len(r.segments))
vchannels := make(map[string]*streamingpb.VChannelMeta, len(r.vchannels))
for segmentID, segment := range r.segments {

View File

@ -7,9 +7,9 @@ import (
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/flusher"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/lock"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/redo"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/segment"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/shard"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/interceptors/timetick"
"github.com/milvus-io/milvus/internal/streamingnode/server/wal/registry"
"github.com/milvus-io/milvus/internal/util/streamingutil/status"
@ -27,9 +27,9 @@ func OpenManager() (Manager, error) {
resource.Resource().Logger().Info("open wal manager", zap.String("walName", walName))
opener, err := registry.MustGetBuilder(walName,
redo.NewInterceptorBuilder(),
flusher.NewInterceptorBuilder(),
lock.NewInterceptorBuilder(),
timetick.NewInterceptorBuilder(),
segment.NewInterceptorBuilder(),
shard.NewInterceptorBuilder(),
).Build()
if err != nil {
return nil, err

View File

@ -38,6 +38,7 @@ func TestManager(t *testing.T) {
func(ctx context.Context, oo *wal.OpenOption) (wal.WAL, error) {
l := mock_wal.NewMockWAL(t)
l.EXPECT().Channel().Return(oo.Channel)
l.EXPECT().IsAvailable().Return(true).Maybe()
l.EXPECT().Close().Return()
l.EXPECT().IsAvailable().Return(true).Maybe()
return l, nil

View File

@ -55,6 +55,7 @@ func (c *connImpl) initialize() {
newBackOff.InitialInterval = 100 * time.Millisecond
newBackOff.MaxInterval = 10 * time.Second
newBackOff.MaxElapsedTime = 0
newBackOff.Reset()
backoff.Retry(func() error {
conn, err := c.dialer(c.initializationNotifier.Context())

View File

@ -39,7 +39,7 @@ require (
github.com/tikv/client-go/v2 v2.0.4
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/x448/float16 v0.8.4
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc
go.etcd.io/etcd/api/v3 v3.5.5
go.etcd.io/etcd/client/v3 v3.5.5
go.etcd.io/etcd/server/v3 v3.5.5

View File

@ -824,8 +824,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0 h1:6B7IUyTRarQVTvusRS0bs6aJn3tUTVTIVqPEOj5IQHM=
github.com/zilliztech/woodpecker v0.0.0-20250427123625-654f0175eff0/go.mod h1:MLt2hsMXd5bVOykwZyWXYHsy9kN4C2gQEaCrID5rM1w=
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc h1:9KEOCnDt//GAimP3Z3Qh08VwPY7H9AOOjHx9C9ckMSQ=
github.com/zilliztech/woodpecker v0.0.0-20250514005855-9467e66ea2bc/go.mod h1:MLt2hsMXd5bVOykwZyWXYHsy9kN4C2gQEaCrID5rM1w=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=

View File

@ -303,6 +303,11 @@ var (
Buckets: secondsBuckets,
}, WALChannelLabelName, WALInterceptorLabelName)
WALImplsAppendRetryTotal = newWALCounterVec(prometheus.CounterOpts{
Name: "impls_append_message_retry_total",
Help: "Total of append message retry",
}, WALChannelLabelName)
WALAppendMessageDurationSeconds = newWALHistogramVec(prometheus.HistogramOpts{
Name: "append_message_duration_seconds",
Help: "Duration of wal append message",
@ -498,6 +503,7 @@ func registerWAL(registry *prometheus.Registry) {
registry.MustRegister(WALAppendMessageTotal)
registry.MustRegister(WALAppendMessageBeforeInterceptorDurationSeconds)
registry.MustRegister(WALAppendMessageAfterInterceptorDurationSeconds)
registry.MustRegister(WALImplsAppendRetryTotal)
registry.MustRegister(WALAppendMessageDurationSeconds)
registry.MustRegister(WALImplsAppendMessageDurationSeconds)
registry.MustRegister(WALWriteAheadBufferEntryTotal)

View File

@ -245,18 +245,16 @@ message TxnContext {
enum TxnState {
// should never be used.
TxnUnknown = 0;
// the transaction begin.
TxnBegin = 1;
// the transaction is in flight.
TxnInFlight = 2;
TxnInFlight = 1;
// the transaction is on commit.
TxnOnCommit = 3;
TxnOnCommit = 2;
// the transaction is committed.
TxnCommitted = 4;
TxnCommitted = 3;
// the transaction is on rollback.
TxnOnRollback = 5;
TxnOnRollback = 4;
// the transaction is rollbacked.
TxnRollbacked = 6;
TxnRollbacked = 5;
}
// RMQMessageLayout is the layout of message for RMQ.

View File

@ -135,39 +135,35 @@ type TxnState int32
const (
// should never be used.
TxnState_TxnUnknown TxnState = 0
// the transaction begin.
TxnState_TxnBegin TxnState = 1
// the transaction is in flight.
TxnState_TxnInFlight TxnState = 2
TxnState_TxnInFlight TxnState = 1
// the transaction is on commit.
TxnState_TxnOnCommit TxnState = 3
TxnState_TxnOnCommit TxnState = 2
// the transaction is committed.
TxnState_TxnCommitted TxnState = 4
TxnState_TxnCommitted TxnState = 3
// the transaction is on rollback.
TxnState_TxnOnRollback TxnState = 5
TxnState_TxnOnRollback TxnState = 4
// the transaction is rollbacked.
TxnState_TxnRollbacked TxnState = 6
TxnState_TxnRollbacked TxnState = 5
)
// Enum value maps for TxnState.
var (
TxnState_name = map[int32]string{
0: "TxnUnknown",
1: "TxnBegin",
2: "TxnInFlight",
3: "TxnOnCommit",
4: "TxnCommitted",
5: "TxnOnRollback",
6: "TxnRollbacked",
1: "TxnInFlight",
2: "TxnOnCommit",
3: "TxnCommitted",
4: "TxnOnRollback",
5: "TxnRollbacked",
}
TxnState_value = map[string]int32{
"TxnUnknown": 0,
"TxnBegin": 1,
"TxnInFlight": 2,
"TxnOnCommit": 3,
"TxnCommitted": 4,
"TxnOnRollback": 5,
"TxnRollbacked": 6,
"TxnInFlight": 1,
"TxnOnCommit": 2,
"TxnCommitted": 3,
"TxnOnRollback": 4,
"TxnRollbacked": 5,
}
)
@ -2295,26 +2291,25 @@ var file_messages_proto_rawDesc = []byte{
0x0d, 0x0a, 0x08, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x78, 0x6e, 0x10, 0x84, 0x07, 0x12, 0x0e,
0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x78, 0x6e, 0x10, 0x85, 0x07, 0x12, 0x10,
0x0a, 0x0b, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x78, 0x6e, 0x10, 0x86, 0x07,
0x12, 0x08, 0x0a, 0x03, 0x54, 0x78, 0x6e, 0x10, 0xe7, 0x07, 0x2a, 0x82, 0x01, 0x0a, 0x08, 0x54,
0x78, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x78, 0x6e, 0x55, 0x6e,
0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x78, 0x6e, 0x42, 0x65,
0x67, 0x69, 0x6e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x49, 0x6e, 0x46, 0x6c,
0x69, 0x67, 0x68, 0x74, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x4f, 0x6e, 0x43,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x78, 0x6e, 0x43, 0x6f,
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x78, 0x6e,
0x4f, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d,
0x54, 0x78, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x10, 0x06, 0x2a,
0x6c, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69,
0x6e, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d,
0x61, 0x69, 0x6e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, 0x6d,
0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x49, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6f, 0x6c,
0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x10, 0x02, 0x42, 0x35, 0x5a,
0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76,
0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67,
0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x12, 0x08, 0x0a, 0x03, 0x54, 0x78, 0x6e, 0x10, 0xe7, 0x07, 0x2a, 0x74, 0x0a, 0x08, 0x54, 0x78,
0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x78, 0x6e, 0x55, 0x6e, 0x6b,
0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x49, 0x6e, 0x46,
0x6c, 0x69, 0x67, 0x68, 0x74, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x78, 0x6e, 0x4f, 0x6e,
0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x78, 0x6e, 0x43,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x78,
0x6e, 0x4f, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x04, 0x12, 0x11, 0x0a,
0x0d, 0x54, 0x78, 0x6e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x10, 0x05,
0x2a, 0x6c, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61,
0x69, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f,
0x6d, 0x61, 0x69, 0x6e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x1d, 0x0a,
0x19, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49,
0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x49, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6f,
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x10, 0x02, 0x42, 0x35,
0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c,
0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b,
0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -538,16 +538,10 @@ message SegmentAssignmentMeta {
uint64 checkpoint_time_tick = 8; // The timetick of checkpoint, the meta already see the message at this timetick.
}
// SegmentAssignmentState is the state of segment assignment.
// The state machine can be described as following:
// 1. PENDING -> GROWING -> SEALED -> FLUSHED
enum SegmentAssignmentState {
SEGMENT_ASSIGNMENT_STATE_UNKNOWN = 0; // should never used.
SEGMENT_ASSIGNMENT_STATE_PENDING = 1;
SEGMENT_ASSIGNMENT_STATE_GROWING = 2;
SEGMENT_ASSIGNMENT_STATE_SEALED = 3;
SEGMENT_ASSIGNMENT_STATE_FLUSHED = 4; // can never be seen, because it's
// removed physically when enter FLUSHED.
SEGMENT_ASSIGNMENT_STATE_GROWING = 1;
SEGMENT_ASSIGNMENT_STATE_FLUSHED = 2;
}
// SegmentAssignmentStat is the stat of segment assignment.

View File

@ -313,34 +313,25 @@ func (VChannelState) EnumDescriptor() ([]byte, []int) {
return file_streaming_proto_rawDescGZIP(), []int{4}
}
// SegmentAssignmentState is the state of segment assignment.
// The state machine can be described as following:
// 1. PENDING -> GROWING -> SEALED -> FLUSHED
type SegmentAssignmentState int32
const (
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_UNKNOWN SegmentAssignmentState = 0 // should never used.
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_PENDING SegmentAssignmentState = 1
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING SegmentAssignmentState = 2
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_SEALED SegmentAssignmentState = 3
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_FLUSHED SegmentAssignmentState = 4 // can never be seen, because it's
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_GROWING SegmentAssignmentState = 1
SegmentAssignmentState_SEGMENT_ASSIGNMENT_STATE_FLUSHED SegmentAssignmentState = 2
)
// Enum value maps for SegmentAssignmentState.
var (
SegmentAssignmentState_name = map[int32]string{
0: "SEGMENT_ASSIGNMENT_STATE_UNKNOWN",
1: "SEGMENT_ASSIGNMENT_STATE_PENDING",
2: "SEGMENT_ASSIGNMENT_STATE_GROWING",
3: "SEGMENT_ASSIGNMENT_STATE_SEALED",
4: "SEGMENT_ASSIGNMENT_STATE_FLUSHED",
1: "SEGMENT_ASSIGNMENT_STATE_GROWING",
2: "SEGMENT_ASSIGNMENT_STATE_FLUSHED",
}
SegmentAssignmentState_value = map[string]int32{
"SEGMENT_ASSIGNMENT_STATE_UNKNOWN": 0,
"SEGMENT_ASSIGNMENT_STATE_PENDING": 1,
"SEGMENT_ASSIGNMENT_STATE_GROWING": 2,
"SEGMENT_ASSIGNMENT_STATE_SEALED": 3,
"SEGMENT_ASSIGNMENT_STATE_FLUSHED": 4,
"SEGMENT_ASSIGNMENT_STATE_GROWING": 1,
"SEGMENT_ASSIGNMENT_STATE_FLUSHED": 2,
}
)
@ -4535,101 +4526,96 @@ var file_streaming_proto_rawDesc = []byte{
0x00, 0x12, 0x19, 0x0a, 0x15, 0x56, 0x43, 0x48, 0x41, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x53, 0x54,
0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16,
0x56, 0x43, 0x48, 0x41, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x44,
0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x02, 0x2a, 0xd5, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x67,
0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x8a, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x67,
0x6d, 0x65, 0x6e, 0x74, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74,
0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x41,
0x53, 0x53, 0x49, 0x47, 0x4e, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f,
0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x47,
0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x4d, 0x45, 0x4e, 0x54, 0x5f,
0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12,
0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x57, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12,
0x24, 0x0a, 0x20, 0x53, 0x45, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47,
0x4e, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x57,
0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x45, 0x47, 0x4d, 0x45, 0x4e, 0x54,
0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54,
0x45, 0x5f, 0x53, 0x45, 0x41, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45,
0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x4d, 0x45, 0x4e, 0x54,
0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4c, 0x55, 0x53, 0x48, 0x45, 0x44, 0x10, 0x04,
0x32, 0x89, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f,
0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6c,
0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74,
0x61, 0x74, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f,
0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x00, 0x32, 0xe8, 0x01, 0x0a,
0x1e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x42,
0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x62, 0x0a, 0x09, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12, 0x28, 0x2e, 0x6d,
0x4e, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4c, 0x55, 0x53,
0x48, 0x45, 0x44, 0x10, 0x02, 0x32, 0x89, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x6d, 0x69, 0x6c, 0x76,
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e,
0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74,
0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x69, 0x6c, 0x76,
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e,
0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22,
0x00, 0x32, 0xe8, 0x01, 0x0a, 0x1e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43,
0x6f, 0x6f, 0x72, 0x64, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x62, 0x0a, 0x09, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73,
0x74, 0x12, 0x28, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64,
0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x69,
0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x03, 0x41, 0x63, 0x6b, 0x12,
0x2b, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61,
0x73, 0x74, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6d,
0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e,
0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x03, 0x41, 0x63, 0x6b, 0x12, 0x2b, 0x2e, 0x6d, 0x69, 0x6c,
0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x41, 0x63, 0x6b,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x41,
0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0xa5, 0x01, 0x0a,
0x1f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x41,
0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x12, 0x81, 0x01, 0x0a, 0x12, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44,
0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x31, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0xa5, 0x01, 0x0a, 0x1f, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12,
0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76,
0x65, 0x72, 0x12, 0x31, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x41, 0x73, 0x73, 0x69,
0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x41,
0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65,
0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32,
0xe1, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64,
0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x60, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x6d, 0x69, 0x6c,
0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f,
0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x6d, 0x69, 0x6c,
0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x69, 0x6e, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x64,
0x75, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30,
0x01, 0x12, 0x60, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x26, 0x2e, 0x6d,
0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6f,
0x6e, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28,
0x01, 0x30, 0x01, 0x32, 0xbe, 0x03, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x39,
0x69, 0x6e, 0x67, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69,
0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x28, 0x01, 0x30, 0x01, 0x32, 0xe1, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x60, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x12,
0x26, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
0x2e, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x60, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
0x65, 0x12, 0x26, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75,
0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, 0x69, 0x6c, 0x76,
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0xbe, 0x03, 0x0a, 0x1b, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65,
0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x06, 0x41, 0x73, 0x73,
0x69, 0x67, 0x6e, 0x12, 0x39, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65,
0x72, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a,
0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74,
0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, 0x73, 0x73, 0x69,
0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6d, 0x69, 0x6c, 0x76,
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65,
0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f,
0x76, 0x65, 0x12, 0x39, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a,
0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x39, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61,
0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e,
0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x96, 0x01, 0x0a, 0x0d,
0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x40, 0x2e,
0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x41, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x73,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6c,
0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c,
0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x96, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74,
0x75, 0x73, 0x12, 0x40, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74,
0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67,
0x65, 0x72, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69,
0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x70,
0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -12,7 +12,6 @@ type (
)
const (
TxnStateBegin TxnState = messagespb.TxnState_TxnBegin
TxnStateInFlight TxnState = messagespb.TxnState_TxnInFlight
TxnStateOnCommit TxnState = messagespb.TxnState_TxnOnCommit
TxnStateCommitted TxnState = messagespb.TxnState_TxnCommitted

View File

@ -5,15 +5,34 @@ package walimplstest
import (
"context"
"math/rand"
"github.com/cockroachdb/errors"
"go.uber.org/atomic"
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls"
"github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/helper"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
)
var _ walimpls.WALImpls = &walImpls{}
var (
_ walimpls.WALImpls = &walImpls{}
fenced = typeutil.NewConcurrentSet[string]()
enableFenceError = atomic.NewBool(true)
)
// EnableFenced enables fenced mode for the given channel.
func EnableFenced(channel string) {
fenced.Insert(channel)
}
// DisableFenced disables fenced mode for the given channel.
func DisableFenced(channel string) {
fenced.Remove(channel)
}
type walImpls struct {
helper.WALHelper
@ -28,6 +47,12 @@ func (w *walImpls) Append(ctx context.Context, msg message.MutableMessage) (mess
if w.Channel().AccessMode != types.AccessModeRW {
panic("write on a wal that is not in read-write mode")
}
if fenced.Contain(w.Channel().Name) {
return nil, errors.Mark(errors.New("err"), walimpls.ErrFenced)
}
if enableFenceError.Load() && rand.Int31n(30) == 0 {
return nil, errors.New("random error")
}
return w.datas.Append(ctx, msg)
}

View File

@ -7,5 +7,7 @@ import (
)
func TestWALImplsTest(t *testing.T) {
enableFenceError.Store(false)
defer enableFenceError.Store(true)
walimpls.NewWALImplsTestFramework(t, 100, &openerBuilder{}).Run()
}

View File

@ -252,6 +252,7 @@ queryNode:
cpu: "0.5"
memory: 512Mi
streamingNode:
replicas: 2
resources:
limits:
cpu: "2"