mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 17:48:29 +08:00
Flush by flush channels (#26859)
Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
parent
0ff4ddc76c
commit
dd2cb1d44a
2
go.mod
2
go.mod
@ -21,7 +21,7 @@ require (
|
|||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/klauspost/compress v1.16.5
|
github.com/klauspost/compress v1.16.5
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
|
||||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230905091144-d8ce91954095
|
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230907032509-23756009c643
|
||||||
github.com/milvus-io/milvus/pkg v0.0.1
|
github.com/milvus-io/milvus/pkg v0.0.1
|
||||||
github.com/minio/minio-go/v7 v7.0.56
|
github.com/minio/minio-go/v7 v7.0.56
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.14.0
|
||||||
|
|||||||
6
go.sum
6
go.sum
@ -111,6 +111,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/bigsheeper/milvus-proto/go-api/v2 v2.0.0-20230906082705-4e84b4cf314b h1:OPGVqhJrJMOAUJeEuboKGTIsrllhJb2+ZgQMDBEdbS0=
|
||||||
|
github.com/bigsheeper/milvus-proto/go-api/v2 v2.0.0-20230906082705-4e84b4cf314b/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
|
||||||
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
||||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||||
github.com/bits-and-blooms/bloom/v3 v3.0.1 h1:Inlf0YXbgehxVjMPmCGv86iMCKMGPPrPSHtBF5yRHwA=
|
github.com/bits-and-blooms/bloom/v3 v3.0.1 h1:Inlf0YXbgehxVjMPmCGv86iMCKMGPPrPSHtBF5yRHwA=
|
||||||
@ -576,8 +578,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le
|
|||||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
|
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
|
||||||
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
|
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
|
||||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230905091144-d8ce91954095 h1:PyN9bVl/joOroIZKizJlDaI4wa9Zd84P6nNDm/wYUgY=
|
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230907032509-23756009c643 h1:3MXEYckliGnyepZeLDrhn+speelsoRKU1IwD8JrxXMo=
|
||||||
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230905091144-d8ce91954095/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
|
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.1-0.20230907032509-23756009c643/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
|
||||||
github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A=
|
github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A=
|
||||||
github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w=
|
github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w=
|
||||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
|
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
|
||||||
|
|||||||
@ -106,6 +106,29 @@ func (c *Cluster) Flush(ctx context.Context, nodeID int64, channel string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) FlushChannels(ctx context.Context, nodeID int64, flushTs Timestamp, channels []string) error {
|
||||||
|
if len(channels) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, channel := range channels {
|
||||||
|
if !c.channelManager.Match(nodeID, channel) {
|
||||||
|
return fmt.Errorf("channel %s is not watched on node %d", channel, nodeID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &datapb.FlushChannelsRequest{
|
||||||
|
Base: commonpbutil.NewMsgBase(
|
||||||
|
commonpbutil.WithSourceID(paramtable.GetNodeID()),
|
||||||
|
commonpbutil.WithTargetID(nodeID),
|
||||||
|
),
|
||||||
|
FlushTs: flushTs,
|
||||||
|
Channels: channels,
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.sessionManager.FlushChannels(ctx, nodeID, req)
|
||||||
|
}
|
||||||
|
|
||||||
// Import sends import requests to DataNodes whose ID==nodeID.
|
// Import sends import requests to DataNodes whose ID==nodeID.
|
||||||
func (c *Cluster) Import(ctx context.Context, nodeID int64, it *datapb.ImportTaskRequest) {
|
func (c *Cluster) Import(ctx context.Context, nodeID int64, it *datapb.ImportTaskRequest) {
|
||||||
c.sessionManager.Import(ctx, nodeID, it)
|
c.sessionManager.Import(ctx, nodeID, it)
|
||||||
|
|||||||
@ -303,6 +303,10 @@ func (c *mockDataNodeClient) SyncSegments(ctx context.Context, req *datapb.SyncS
|
|||||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil
|
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *mockDataNodeClient) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *mockDataNodeClient) Stop() error {
|
func (c *mockDataNodeClient) Stop() error {
|
||||||
c.state = commonpb.StateCode_Abnormal
|
c.state = commonpb.StateCode_Abnormal
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -3375,28 +3375,46 @@ func TestPostFlush(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetFlushState(t *testing.T) {
|
func TestGetFlushState(t *testing.T) {
|
||||||
t.Run("get flush state with all flushed segments", func(t *testing.T) {
|
t.Run("get flush state with all flushed segments", func(t *testing.T) {
|
||||||
svr := &Server{
|
meta, err := newMemoryMeta()
|
||||||
meta: &meta{
|
assert.NoError(t, err)
|
||||||
segments: &SegmentsInfo{
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
segments: map[int64]*SegmentInfo{
|
defer closeTestServer(t, svr)
|
||||||
1: {
|
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 1,
|
ID: 1,
|
||||||
State: commonpb.SegmentState_Flushed,
|
State: commonpb.SegmentState_Flushed,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
2: {
|
assert.NoError(t, err)
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 2,
|
ID: 2,
|
||||||
State: commonpb.SegmentState_Flushed,
|
State: commonpb.SegmentState_Flushed,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
},
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vchannel = "ch1"
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
svr.channelManager = &ChannelManager{
|
||||||
|
store: &ChannelStore{
|
||||||
|
channelsInfo: map[int64]*NodeChannelInfo{
|
||||||
|
1: {NodeID: 1, Channels: []*channel{{Name: vchannel, CollectionID: collection}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
|
||||||
resp, err := svr.GetFlushState(context.TODO(), &milvuspb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
err = svr.meta.UpdateChannelCheckpoint(vchannel, &msgpb.MsgPosition{
|
||||||
|
MsgID: []byte{1},
|
||||||
|
Timestamp: 12,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
@ -3405,29 +3423,46 @@ func TestGetFlushState(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get flush state with unflushed segments", func(t *testing.T) {
|
t.Run("get flush state with unflushed segments", func(t *testing.T) {
|
||||||
svr := &Server{
|
meta, err := newMemoryMeta()
|
||||||
meta: &meta{
|
assert.NoError(t, err)
|
||||||
segments: &SegmentsInfo{
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
segments: map[int64]*SegmentInfo{
|
defer closeTestServer(t, svr)
|
||||||
1: {
|
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 1,
|
ID: 1,
|
||||||
State: commonpb.SegmentState_Flushed,
|
State: commonpb.SegmentState_Flushed,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
2: {
|
assert.NoError(t, err)
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 2,
|
ID: 2,
|
||||||
State: commonpb.SegmentState_Sealed,
|
State: commonpb.SegmentState_Sealed,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
},
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vchannel = "ch1"
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
svr.channelManager = &ChannelManager{
|
||||||
|
store: &ChannelStore{
|
||||||
|
channelsInfo: map[int64]*NodeChannelInfo{
|
||||||
|
1: {NodeID: 1, Channels: []*channel{{Name: vchannel, CollectionID: collection}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
|
||||||
|
|
||||||
resp, err := svr.GetFlushState(context.TODO(), &milvuspb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
err = svr.meta.UpdateChannelCheckpoint(vchannel, &msgpb.MsgPosition{
|
||||||
|
MsgID: []byte{1},
|
||||||
|
Timestamp: 12,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
@ -3436,35 +3471,145 @@ func TestGetFlushState(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get flush state with compacted segments", func(t *testing.T) {
|
t.Run("get flush state with compacted segments", func(t *testing.T) {
|
||||||
svr := &Server{
|
meta, err := newMemoryMeta()
|
||||||
meta: &meta{
|
assert.NoError(t, err)
|
||||||
segments: &SegmentsInfo{
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
segments: map[int64]*SegmentInfo{
|
defer closeTestServer(t, svr)
|
||||||
1: {
|
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 1,
|
ID: 1,
|
||||||
State: commonpb.SegmentState_Flushed,
|
State: commonpb.SegmentState_Flushed,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
2: {
|
assert.NoError(t, err)
|
||||||
|
err = meta.AddSegment(&SegmentInfo{
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
ID: 2,
|
ID: 2,
|
||||||
State: commonpb.SegmentState_Dropped,
|
State: commonpb.SegmentState_Dropped,
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
},
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vchannel = "ch1"
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
svr.channelManager = &ChannelManager{
|
||||||
|
store: &ChannelStore{
|
||||||
|
channelsInfo: map[int64]*NodeChannelInfo{
|
||||||
|
1: {NodeID: 1, Channels: []*channel{{Name: vchannel, CollectionID: collection}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
|
||||||
|
|
||||||
resp, err := svr.GetFlushState(context.TODO(), &milvuspb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
err = svr.meta.UpdateChannelCheckpoint(vchannel, &msgpb.MsgPosition{
|
||||||
|
MsgID: []byte{1},
|
||||||
|
Timestamp: 12,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.TODO(), &datapb.GetFlushStateRequest{SegmentIDs: []int64{1, 2}})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
Flushed: true,
|
Flushed: true,
|
||||||
}, resp)
|
}, resp)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("channel flushed", func(t *testing.T) {
|
||||||
|
meta, err := newMemoryMeta()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
|
defer closeTestServer(t, svr)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vchannel = "ch1"
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
svr.channelManager = &ChannelManager{
|
||||||
|
store: &ChannelStore{
|
||||||
|
channelsInfo: map[int64]*NodeChannelInfo{
|
||||||
|
1: {NodeID: 1, Channels: []*channel{{Name: vchannel, CollectionID: collection}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = svr.meta.UpdateChannelCheckpoint(vchannel, &msgpb.MsgPosition{
|
||||||
|
MsgID: []byte{1},
|
||||||
|
Timestamp: 12,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.Background(), &datapb.GetFlushStateRequest{
|
||||||
|
FlushTs: 11,
|
||||||
|
CollectionID: collection,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
|
Flushed: true,
|
||||||
|
}, resp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("channel unflushed", func(t *testing.T) {
|
||||||
|
meta, err := newMemoryMeta()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
|
defer closeTestServer(t, svr)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vchannel = "ch1"
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
svr.channelManager = &ChannelManager{
|
||||||
|
store: &ChannelStore{
|
||||||
|
channelsInfo: map[int64]*NodeChannelInfo{
|
||||||
|
1: {NodeID: 1, Channels: []*channel{{Name: vchannel, CollectionID: collection}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = svr.meta.UpdateChannelCheckpoint(vchannel, &msgpb.MsgPosition{
|
||||||
|
MsgID: []byte{1},
|
||||||
|
Timestamp: 10,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.Background(), &datapb.GetFlushStateRequest{
|
||||||
|
FlushTs: 11,
|
||||||
|
CollectionID: collection,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
|
Flushed: false,
|
||||||
|
}, resp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no channels", func(t *testing.T) {
|
||||||
|
meta, err := newMemoryMeta()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
svr := newTestServerWithMeta(t, nil, meta)
|
||||||
|
defer closeTestServer(t, svr)
|
||||||
|
|
||||||
|
var (
|
||||||
|
collection = int64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
resp, err := svr.GetFlushState(context.Background(), &datapb.GetFlushStateRequest{
|
||||||
|
FlushTs: 11,
|
||||||
|
CollectionID: collection,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{
|
||||||
|
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
|
||||||
|
Flushed: false,
|
||||||
|
}, resp)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetFlushAllState(t *testing.T) {
|
func TestGetFlushAllState(t *testing.T) {
|
||||||
|
|||||||
@ -42,6 +42,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||||
|
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||||
)
|
)
|
||||||
@ -116,24 +117,46 @@ func (s *Server) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
|
|||||||
flushSegmentIDs := make([]UniqueID, 0, len(segments))
|
flushSegmentIDs := make([]UniqueID, 0, len(segments))
|
||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
if segment != nil &&
|
if segment != nil &&
|
||||||
(segment.GetState() == commonpb.SegmentState_Flushed ||
|
(isFlushState(segment.GetState())) &&
|
||||||
segment.GetState() == commonpb.SegmentState_Flushing) &&
|
|
||||||
!sealedSegmentsIDDict[segment.GetID()] {
|
!sealedSegmentsIDDict[segment.GetID()] {
|
||||||
flushSegmentIDs = append(flushSegmentIDs, segment.GetID())
|
flushSegmentIDs = append(flushSegmentIDs, segment.GetID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = retry.Do(ctx, func() error {
|
||||||
|
for _, channelInfo := range s.channelManager.GetChannels() {
|
||||||
|
nodeID := channelInfo.NodeID
|
||||||
|
channels := lo.Filter(channelInfo.Channels, func(channel *channel, _ int) bool {
|
||||||
|
return channel.CollectionID == req.GetCollectionID()
|
||||||
|
})
|
||||||
|
channelNames := lo.Map(channels, func(channel *channel, _ int) string {
|
||||||
|
return channel.Name
|
||||||
|
})
|
||||||
|
err = s.cluster.FlushChannels(ctx, nodeID, ts, channelNames)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
resp.Status = merr.Status(err)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("flush response with segments",
|
log.Info("flush response with segments",
|
||||||
zap.Int64("collectionID", req.GetCollectionID()),
|
zap.Int64("collectionID", req.GetCollectionID()),
|
||||||
zap.Int64s("sealSegments", sealedSegmentIDs),
|
zap.Int64s("sealSegments", sealedSegmentIDs),
|
||||||
zap.Int64s("flushSegments", flushSegmentIDs),
|
zap.Int64s("flushSegments", flushSegmentIDs),
|
||||||
zap.Time("timeOfSeal", timeOfSeal))
|
zap.Time("timeOfSeal", timeOfSeal),
|
||||||
|
zap.Time("flushTs", tsoutil.PhysicalTime(ts)))
|
||||||
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
||||||
resp.DbID = req.GetDbID()
|
resp.DbID = req.GetDbID()
|
||||||
resp.CollectionID = req.GetCollectionID()
|
resp.CollectionID = req.GetCollectionID()
|
||||||
resp.SegmentIDs = sealedSegmentIDs
|
resp.SegmentIDs = sealedSegmentIDs
|
||||||
resp.TimeOfSeal = timeOfSeal.Unix()
|
resp.TimeOfSeal = timeOfSeal.Unix()
|
||||||
resp.FlushSegmentIDs = flushSegmentIDs
|
resp.FlushSegmentIDs = flushSegmentIDs
|
||||||
|
resp.FlushTs = ts
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1245,36 +1268,70 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (s *Server) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
log := log.Ctx(ctx).WithRateGroup("dc.GetFlushState", 1, 60)
|
log := log.Ctx(ctx).With(zap.Int64("collection", req.GetCollectionID()),
|
||||||
|
zap.Time("flushTs", tsoutil.PhysicalTime(req.GetFlushTs()))).
|
||||||
|
WithRateGroup("dc.GetFlushState", 1, 60)
|
||||||
resp := &milvuspb.GetFlushStateResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError}}
|
resp := &milvuspb.GetFlushStateResponse{Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError}}
|
||||||
if s.isClosed() {
|
if s.isClosed() {
|
||||||
log.Warn("DataCoord receive GetFlushState request, server closed",
|
log.Warn("DataCoord receive GetFlushState request, server closed")
|
||||||
zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Int("len", len(req.GetSegmentIDs())))
|
|
||||||
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
|
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(req.GetSegmentIDs()) > 0 {
|
||||||
var unflushed []UniqueID
|
var unflushed []UniqueID
|
||||||
for _, sid := range req.GetSegmentIDs() {
|
for _, sid := range req.GetSegmentIDs() {
|
||||||
segment := s.meta.GetHealthySegment(sid)
|
segment := s.meta.GetHealthySegment(sid)
|
||||||
// segment is nil if it was compacted or it's a empty segment and is set to dropped
|
// segment is nil if it was compacted, or it's an empty segment and is set to dropped
|
||||||
if segment == nil || segment.GetState() == commonpb.SegmentState_Flushing ||
|
if segment == nil || isFlushState(segment.GetState()) {
|
||||||
segment.GetState() == commonpb.SegmentState_Flushed {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
unflushed = append(unflushed, sid)
|
unflushed = append(unflushed, sid)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(unflushed) != 0 {
|
if len(unflushed) != 0 {
|
||||||
log.RatedInfo(10, "DataCoord receive GetFlushState request, Flushed is false", zap.Int64s("unflushed", unflushed), zap.Int("len", len(unflushed)))
|
log.RatedInfo(10, "DataCoord receive GetFlushState request, Flushed is false", zap.Int64s("unflushed", unflushed), zap.Int("len", len(unflushed)))
|
||||||
resp.Flushed = false
|
resp.Flushed = false
|
||||||
} else {
|
|
||||||
log.Info("DataCoord receive GetFlushState request, Flushed is true", zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Int("len", len(req.GetSegmentIDs())))
|
|
||||||
resp.Flushed = true
|
|
||||||
}
|
|
||||||
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
channels := make([]string, 0)
|
||||||
|
for _, channelInfo := range s.channelManager.GetChannels() {
|
||||||
|
filtered := lo.Filter(channelInfo.Channels, func(channel *channel, _ int) bool {
|
||||||
|
return channel.CollectionID == req.GetCollectionID()
|
||||||
|
})
|
||||||
|
channelNames := lo.Map(filtered, func(channel *channel, _ int) string {
|
||||||
|
return channel.Name
|
||||||
|
})
|
||||||
|
channels = append(channels, channelNames...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(channels) == 0 {
|
||||||
|
resp.Flushed = false
|
||||||
|
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
||||||
|
log.Warn("GetFlushState failed, no channels found")
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, channel := range channels {
|
||||||
|
cp := s.meta.GetChannelCheckpoint(channel)
|
||||||
|
if cp == nil || cp.GetTimestamp() < req.GetFlushTs() {
|
||||||
|
resp.Flushed = false
|
||||||
|
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
||||||
|
log.RatedInfo(10, "GetFlushState failed, channel unflushed", zap.String("channel", channel),
|
||||||
|
zap.Time("CP", tsoutil.PhysicalTime(cp.GetTimestamp())),
|
||||||
|
zap.Duration("lag", tsoutil.PhysicalTime(req.GetFlushTs()).Sub(tsoutil.PhysicalTime(cp.GetTimestamp()))))
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Flushed = true
|
||||||
|
resp.Status.ErrorCode = commonpb.ErrorCode_Success
|
||||||
|
log.Info("GetFlushState all flushed")
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -300,6 +300,24 @@ func (c *SessionManager) GetCompactionState() map[int64]*datapb.CompactionStateR
|
|||||||
return rst
|
return rst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *SessionManager) FlushChannels(ctx context.Context, nodeID int64, req *datapb.FlushChannelsRequest) error {
|
||||||
|
log := log.Ctx(ctx).With(zap.Int64("nodeID", nodeID))
|
||||||
|
cli, err := c.getClient(ctx, nodeID)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("failed to get client", zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := cli.FlushChannels(ctx, req)
|
||||||
|
err = VerifyResponse(resp, err)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("SessionManager.FlushChannels failed", zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("SessionManager.FlushChannels successfully")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *SessionManager) getClient(ctx context.Context, nodeID int64) (types.DataNode, error) {
|
func (c *SessionManager) getClient(ctx context.Context, nodeID int64) (types.DataNode, error) {
|
||||||
c.sessions.RLock()
|
c.sessions.RLock()
|
||||||
session, ok := c.sessions.data[nodeID]
|
session, ok := c.sessions.data[nodeID]
|
||||||
|
|||||||
@ -99,7 +99,11 @@ type Channel interface {
|
|||||||
|
|
||||||
// getTotalMemorySize returns the sum of memory sizes of segments.
|
// getTotalMemorySize returns the sum of memory sizes of segments.
|
||||||
getTotalMemorySize() int64
|
getTotalMemorySize() int64
|
||||||
forceToSync()
|
setIsHighMemory(b bool)
|
||||||
|
getIsHighMemory() bool
|
||||||
|
|
||||||
|
getFlushTs() Timestamp
|
||||||
|
setFlushTs(ts Timestamp)
|
||||||
|
|
||||||
close()
|
close()
|
||||||
}
|
}
|
||||||
@ -114,7 +118,15 @@ type ChannelMeta struct {
|
|||||||
segMu sync.RWMutex
|
segMu sync.RWMutex
|
||||||
segments map[UniqueID]*Segment
|
segments map[UniqueID]*Segment
|
||||||
|
|
||||||
needToSync *atomic.Bool
|
// isHighMemory is intended to trigger the syncing of segments
|
||||||
|
// when segment's buffer consumes a significant amount of memory.
|
||||||
|
isHighMemory *atomic.Bool
|
||||||
|
|
||||||
|
// flushTs is intended to trigger:
|
||||||
|
// 1. the syncing of segments when consumed ts exceeds flushTs;
|
||||||
|
// 2. the updating of channelCP when channelCP exceeds flushTs.
|
||||||
|
flushTs *atomic.Uint64
|
||||||
|
|
||||||
syncPolicies []segmentSyncPolicy
|
syncPolicies []segmentSyncPolicy
|
||||||
|
|
||||||
metaService *metaService
|
metaService *metaService
|
||||||
@ -147,10 +159,12 @@ func newChannel(channelName string, collID UniqueID, schema *schemapb.Collection
|
|||||||
|
|
||||||
segments: make(map[UniqueID]*Segment),
|
segments: make(map[UniqueID]*Segment),
|
||||||
|
|
||||||
needToSync: atomic.NewBool(false),
|
isHighMemory: atomic.NewBool(false),
|
||||||
|
flushTs: atomic.NewUint64(math.MaxUint64),
|
||||||
syncPolicies: []segmentSyncPolicy{
|
syncPolicies: []segmentSyncPolicy{
|
||||||
syncPeriodically(),
|
syncPeriodically(),
|
||||||
syncMemoryTooHigh(),
|
syncMemoryTooHigh(),
|
||||||
|
syncSegmentsAtTs(),
|
||||||
},
|
},
|
||||||
|
|
||||||
metaService: metaService,
|
metaService: metaService,
|
||||||
@ -279,7 +293,7 @@ func (c *ChannelMeta) listSegmentIDsToSync(ts Timestamp) []UniqueID {
|
|||||||
|
|
||||||
segIDsToSync := typeutil.NewUniqueSet()
|
segIDsToSync := typeutil.NewUniqueSet()
|
||||||
for _, policy := range c.syncPolicies {
|
for _, policy := range c.syncPolicies {
|
||||||
segments := policy(validSegs, ts, c.needToSync)
|
segments := policy(validSegs, c, ts)
|
||||||
for _, segID := range segments {
|
for _, segID := range segments {
|
||||||
segIDsToSync.Insert(segID)
|
segIDsToSync.Insert(segID)
|
||||||
}
|
}
|
||||||
@ -797,6 +811,7 @@ func (c *ChannelMeta) listNotFlushedSegmentIDs() []UniqueID {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelMeta) getChannelCheckpoint(ttPos *msgpb.MsgPosition) *msgpb.MsgPosition {
|
func (c *ChannelMeta) getChannelCheckpoint(ttPos *msgpb.MsgPosition) *msgpb.MsgPosition {
|
||||||
|
log := log.With().WithRateGroup("ChannelMeta", 1, 60)
|
||||||
c.segMu.RLock()
|
c.segMu.RLock()
|
||||||
defer c.segMu.RUnlock()
|
defer c.segMu.RUnlock()
|
||||||
channelCP := &msgpb.MsgPosition{Timestamp: math.MaxUint64}
|
channelCP := &msgpb.MsgPosition{Timestamp: math.MaxUint64}
|
||||||
@ -818,8 +833,7 @@ func (c *ChannelMeta) getChannelCheckpoint(ttPos *msgpb.MsgPosition) *msgpb.MsgP
|
|||||||
channelCP = db.startPos
|
channelCP = db.startPos
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO: maybe too many logs would print
|
log.RatedDebug(10, "getChannelCheckpoint for segment", zap.Int64("segmentID", seg.segmentID),
|
||||||
log.Debug("getChannelCheckpoint for segment", zap.Int64("segmentID", seg.segmentID),
|
|
||||||
zap.Bool("isCurIBEmpty", seg.curInsertBuf == nil),
|
zap.Bool("isCurIBEmpty", seg.curInsertBuf == nil),
|
||||||
zap.Bool("isCurDBEmpty", seg.curDeleteBuf == nil),
|
zap.Bool("isCurDBEmpty", seg.curDeleteBuf == nil),
|
||||||
zap.Int("len(hisIB)", len(seg.historyInsertBuf)),
|
zap.Int("len(hisIB)", len(seg.historyInsertBuf)),
|
||||||
@ -926,8 +940,12 @@ func (c *ChannelMeta) evictHistoryDeleteBuffer(segmentID UniqueID, endPos *msgpb
|
|||||||
log.Warn("cannot find segment when evictHistoryDeleteBuffer", zap.Int64("segmentID", segmentID))
|
log.Warn("cannot find segment when evictHistoryDeleteBuffer", zap.Int64("segmentID", segmentID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelMeta) forceToSync() {
|
func (c *ChannelMeta) setIsHighMemory(b bool) {
|
||||||
c.needToSync.Store(true)
|
c.isHighMemory.Store(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChannelMeta) getIsHighMemory() bool {
|
||||||
|
return c.isHighMemory.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelMeta) getTotalMemorySize() int64 {
|
func (c *ChannelMeta) getTotalMemorySize() int64 {
|
||||||
@ -940,6 +958,14 @@ func (c *ChannelMeta) getTotalMemorySize() int64 {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ChannelMeta) getFlushTs() Timestamp {
|
||||||
|
return c.flushTs.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChannelMeta) setFlushTs(ts Timestamp) {
|
||||||
|
c.flushTs.Store(ts)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *ChannelMeta) close() {
|
func (c *ChannelMeta) close() {
|
||||||
c.closed.Store(true)
|
c.closed.Store(true)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,7 +20,9 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -203,7 +205,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// init data node
|
// init data node
|
||||||
insertChannelName := "by-dev-rootcoord-dml"
|
insertChannelName := fmt.Sprintf("by-dev-rootcoord-dml-%d", rand.Int())
|
||||||
|
|
||||||
Factory := &MetaFactory{}
|
Factory := &MetaFactory{}
|
||||||
collMeta := Factory.GetCollectionMeta(UniqueID(0), "coll1", schemapb.DataType_Int64)
|
collMeta := Factory.GetCollectionMeta(UniqueID(0), "coll1", schemapb.DataType_Int64)
|
||||||
@ -288,7 +290,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||||||
|
|
||||||
timeRange := TimeRange{
|
timeRange := TimeRange{
|
||||||
timestampMin: 0,
|
timestampMin: 0,
|
||||||
timestampMax: math.MaxUint64,
|
timestampMax: math.MaxUint64 - 1,
|
||||||
}
|
}
|
||||||
dataFactory := NewDataFactory()
|
dataFactory := NewDataFactory()
|
||||||
insertMessages := dataFactory.GetMsgStreamTsInsertMsgs(2, insertChannelName, tsoutil.GetCurrentTime())
|
insertMessages := dataFactory.GetMsgStreamTsInsertMsgs(2, insertChannelName, tsoutil.GetCurrentTime())
|
||||||
@ -318,7 +320,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||||||
Base: &commonpb.MsgBase{
|
Base: &commonpb.MsgBase{
|
||||||
MsgType: commonpb.MsgType_TimeTick,
|
MsgType: commonpb.MsgType_TimeTick,
|
||||||
MsgID: UniqueID(0),
|
MsgID: UniqueID(0),
|
||||||
Timestamp: math.MaxUint64,
|
Timestamp: math.MaxUint64 - 1,
|
||||||
SourceID: 0,
|
SourceID: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -451,7 +451,7 @@ func (ibNode *insertBufferNode) FillInSyncTasks(fgMsg *flowGraphMsg, seg2Upload
|
|||||||
func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID, endPosition *msgpb.MsgPosition) []UniqueID {
|
func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID, endPosition *msgpb.MsgPosition) []UniqueID {
|
||||||
syncTasks := ibNode.FillInSyncTasks(fgMsg, seg2Upload)
|
syncTasks := ibNode.FillInSyncTasks(fgMsg, seg2Upload)
|
||||||
segmentsToSync := make([]UniqueID, 0, len(syncTasks))
|
segmentsToSync := make([]UniqueID, 0, len(syncTasks))
|
||||||
ibNode.channel.(*ChannelMeta).needToSync.Store(false)
|
ibNode.channel.setIsHighMemory(false)
|
||||||
|
|
||||||
for _, task := range syncTasks {
|
for _, task := range syncTasks {
|
||||||
log := log.With(zap.Int64("segmentID", task.segmentID),
|
log := log.With(zap.Int64("segmentID", task.segmentID),
|
||||||
|
|||||||
@ -343,7 +343,7 @@ func TestFlowGraphInsertBufferNode_AutoFlush(t *testing.T) {
|
|||||||
channel := &ChannelMeta{
|
channel := &ChannelMeta{
|
||||||
collectionID: collMeta.ID,
|
collectionID: collMeta.ID,
|
||||||
segments: make(map[UniqueID]*Segment),
|
segments: make(map[UniqueID]*Segment),
|
||||||
needToSync: atomic.NewBool(false),
|
isHighMemory: atomic.NewBool(false),
|
||||||
}
|
}
|
||||||
|
|
||||||
channel.metaService = newMetaService(mockRootCoord, collMeta.ID)
|
channel.metaService = newMetaService(mockRootCoord, collMeta.ID)
|
||||||
@ -590,7 +590,7 @@ func TestInsertBufferNodeRollBF(t *testing.T) {
|
|||||||
channel := &ChannelMeta{
|
channel := &ChannelMeta{
|
||||||
collectionID: collMeta.ID,
|
collectionID: collMeta.ID,
|
||||||
segments: make(map[UniqueID]*Segment),
|
segments: make(map[UniqueID]*Segment),
|
||||||
needToSync: atomic.NewBool(false),
|
isHighMemory: atomic.NewBool(false),
|
||||||
}
|
}
|
||||||
|
|
||||||
channel.metaService = newMetaService(mockRootCoord, collMeta.ID)
|
channel.metaService = newMetaService(mockRootCoord, collMeta.ID)
|
||||||
|
|||||||
@ -108,7 +108,7 @@ func (fm *flowgraphManager) execute(totalMemory uint64) {
|
|||||||
return channels[i].bufferSize > channels[j].bufferSize
|
return channels[i].bufferSize > channels[j].bufferSize
|
||||||
})
|
})
|
||||||
if fg, ok := fm.flowgraphs.Get(channels[0].channel); ok { // sync the first channel with the largest memory usage
|
if fg, ok := fm.flowgraphs.Get(channels[0].channel); ok { // sync the first channel with the largest memory usage
|
||||||
fg.channel.forceToSync()
|
fg.channel.setIsHighMemory(true)
|
||||||
log.Info("notify flowgraph to sync",
|
log.Info("notify flowgraph to sync",
|
||||||
zap.String("channel", channels[0].channel), zap.Int64("bufferSize", channels[0].bufferSize))
|
zap.String("channel", channels[0].channel), zap.Int64("bufferSize", channels[0].bufferSize))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -222,14 +222,14 @@ func TestFlowGraphManager(t *testing.T) {
|
|||||||
err = fg.channel.addSegment(addSegmentReq{segID: 0})
|
err = fg.channel.addSegment(addSegmentReq{segID: 0})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
fg.channel.getSegment(0).memorySize = memorySize
|
fg.channel.getSegment(0).memorySize = memorySize
|
||||||
fg.channel.(*ChannelMeta).needToSync.Store(false)
|
fg.channel.setIsHighMemory(false)
|
||||||
}
|
}
|
||||||
fm.execute(test.totalMemory)
|
fm.execute(test.totalMemory)
|
||||||
for i, needToSync := range test.expectNeedToSync {
|
for i, needToSync := range test.expectNeedToSync {
|
||||||
vchannel := fmt.Sprintf("%s%d", channelPrefix, i)
|
vchannel := fmt.Sprintf("%s%d", channelPrefix, i)
|
||||||
fg, ok := fm.flowgraphs.Get(vchannel)
|
fg, ok := fm.flowgraphs.Get(vchannel)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, needToSync, fg.channel.(*ChannelMeta).needToSync.Load())
|
assert.Equal(t, needToSync, fg.channel.getIsHighMemory())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@ -19,6 +19,7 @@ package datanode
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -73,30 +74,40 @@ func (ttn *ttNode) Operate(in []Msg) []Msg {
|
|||||||
fgMsg := in[0].(*flowGraphMsg)
|
fgMsg := in[0].(*flowGraphMsg)
|
||||||
if fgMsg.IsCloseMsg() {
|
if fgMsg.IsCloseMsg() {
|
||||||
if len(fgMsg.endPositions) > 0 {
|
if len(fgMsg.endPositions) > 0 {
|
||||||
|
channelPos := ttn.channel.getChannelCheckpoint(fgMsg.endPositions[0])
|
||||||
log.Info("flowgraph is closing, force update channel CP",
|
log.Info("flowgraph is closing, force update channel CP",
|
||||||
zap.Uint64("endTs", fgMsg.endPositions[0].GetTimestamp()),
|
zap.Time("cpTs", tsoutil.PhysicalTime(channelPos.GetTimestamp())),
|
||||||
zap.String("channel", fgMsg.endPositions[0].GetChannelName()))
|
zap.String("channel", channelPos.GetChannelName()))
|
||||||
ttn.updateChannelCP(fgMsg.endPositions[0])
|
ttn.updateChannelCP(channelPos)
|
||||||
}
|
}
|
||||||
return in
|
return in
|
||||||
}
|
}
|
||||||
|
|
||||||
curTs, _ := tsoutil.ParseTS(fgMsg.timeRange.timestampMax)
|
curTs, _ := tsoutil.ParseTS(fgMsg.timeRange.timestampMax)
|
||||||
|
channelPos := ttn.channel.getChannelCheckpoint(fgMsg.endPositions[0])
|
||||||
|
log := log.With(zap.String("channel", ttn.vChannelName),
|
||||||
|
zap.Time("cpTs", tsoutil.PhysicalTime(channelPos.GetTimestamp())))
|
||||||
if curTs.Sub(ttn.lastUpdateTime) >= updateChanCPInterval {
|
if curTs.Sub(ttn.lastUpdateTime) >= updateChanCPInterval {
|
||||||
ttn.updateChannelCP(fgMsg.endPositions[0])
|
if err := ttn.updateChannelCP(channelPos); err == nil {
|
||||||
ttn.lastUpdateTime = curTs
|
ttn.lastUpdateTime = curTs
|
||||||
|
log.Info("update channel cp periodically")
|
||||||
|
return []Msg{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if channelPos.GetTimestamp() >= ttn.channel.getFlushTs() {
|
||||||
|
if err := ttn.updateChannelCP(channelPos); err == nil {
|
||||||
|
ttn.lastUpdateTime = curTs
|
||||||
|
log.Info("update channel cp at updateTs", zap.Time("updateTs", tsoutil.PhysicalTime(ttn.channel.getFlushTs())))
|
||||||
|
ttn.channel.setFlushTs(math.MaxUint64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return []Msg{}
|
return []Msg{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ttn *ttNode) updateChannelCP(ttPos *msgpb.MsgPosition) {
|
func (ttn *ttNode) updateChannelCP(channelPos *msgpb.MsgPosition) error {
|
||||||
channelPos := ttn.channel.getChannelCheckpoint(ttPos)
|
channelCPTs, _ := tsoutil.ParseTS(channelPos.GetTimestamp())
|
||||||
if channelPos == nil || channelPos.MsgID == nil {
|
|
||||||
log.Warn("updateChannelCP failed, get nil check point", zap.String("vChannel", ttn.vChannelName))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
channelCPTs, _ := tsoutil.ParseTS(channelPos.Timestamp)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), updateChanCPTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), updateChanCPTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -110,13 +121,14 @@ func (ttn *ttNode) updateChannelCP(ttPos *msgpb.MsgPosition) {
|
|||||||
if err = funcutil.VerifyResponse(resp, err); err != nil {
|
if err = funcutil.VerifyResponse(resp, err); err != nil {
|
||||||
log.Warn("UpdateChannelCheckpoint failed", zap.String("channel", ttn.vChannelName),
|
log.Warn("UpdateChannelCheckpoint failed", zap.String("channel", ttn.vChannelName),
|
||||||
zap.Time("channelCPTs", channelCPTs), zap.Error(err))
|
zap.Time("channelCPTs", channelCPTs), zap.Error(err))
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("UpdateChannelCheckpoint success",
|
log.Info("UpdateChannelCheckpoint success",
|
||||||
zap.String("channel", ttn.vChannelName),
|
zap.String("channel", ttn.vChannelName),
|
||||||
zap.Uint64("cpTs", channelPos.Timestamp),
|
zap.Uint64("cpTs", channelPos.GetTimestamp()),
|
||||||
zap.Time("cpTime", channelCPTs))
|
zap.Time("cpTime", channelCPTs))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTTNode(config *nodeConfig, dc types.DataCoord) (*ttNode, error) {
|
func newTTNode(config *nodeConfig, dc types.DataCoord) (*ttNode, error) {
|
||||||
|
|||||||
@ -23,18 +23,18 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus/pkg/log"
|
"github.com/milvus-io/milvus/pkg/log"
|
||||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||||
"go.uber.org/atomic"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const minSyncSize = 0.5 * 1024 * 1024
|
const minSyncSize = 0.5 * 1024 * 1024
|
||||||
|
|
||||||
// segmentsSyncPolicy sync policy applies to segments
|
// segmentsSyncPolicy sync policy applies to segments
|
||||||
type segmentSyncPolicy func(segments []*Segment, ts Timestamp, needToSync *atomic.Bool) []UniqueID
|
type segmentSyncPolicy func(segments []*Segment, c Channel, ts Timestamp) []UniqueID
|
||||||
|
|
||||||
// syncPeriodically get segmentSyncPolicy with segments sync periodically.
|
// syncPeriodically get segmentSyncPolicy with segments sync periodically.
|
||||||
func syncPeriodically() segmentSyncPolicy {
|
func syncPeriodically() segmentSyncPolicy {
|
||||||
return func(segments []*Segment, ts Timestamp, _ *atomic.Bool) []UniqueID {
|
return func(segments []*Segment, c Channel, ts Timestamp) []UniqueID {
|
||||||
segmentsToSync := make([]UniqueID, 0)
|
segmentsToSync := make([]UniqueID, 0)
|
||||||
for _, seg := range segments {
|
for _, seg := range segments {
|
||||||
endPosTime := tsoutil.PhysicalTime(ts)
|
endPosTime := tsoutil.PhysicalTime(ts)
|
||||||
@ -45,7 +45,7 @@ func syncPeriodically() segmentSyncPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(segmentsToSync) > 0 {
|
if len(segmentsToSync) > 0 {
|
||||||
log.Info("sync segment periodically", zap.Int64s("segmentID", segmentsToSync))
|
log.Info("sync segment periodically", zap.Int64s("segmentIDs", segmentsToSync))
|
||||||
}
|
}
|
||||||
return segmentsToSync
|
return segmentsToSync
|
||||||
}
|
}
|
||||||
@ -53,8 +53,8 @@ func syncPeriodically() segmentSyncPolicy {
|
|||||||
|
|
||||||
// syncMemoryTooHigh force sync the largest segment.
|
// syncMemoryTooHigh force sync the largest segment.
|
||||||
func syncMemoryTooHigh() segmentSyncPolicy {
|
func syncMemoryTooHigh() segmentSyncPolicy {
|
||||||
return func(segments []*Segment, ts Timestamp, needToSync *atomic.Bool) []UniqueID {
|
return func(segments []*Segment, c Channel, _ Timestamp) []UniqueID {
|
||||||
if len(segments) == 0 || !needToSync.Load() {
|
if len(segments) == 0 || !c.getIsHighMemory() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
sort.Slice(segments, func(i, j int) bool {
|
sort.Slice(segments, func(i, j int) bool {
|
||||||
@ -74,3 +74,22 @@ func syncMemoryTooHigh() segmentSyncPolicy {
|
|||||||
return syncSegments
|
return syncSegments
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// syncSegmentsAtTs returns a new segmentSyncPolicy, sync segments when ts exceeds ChannelMeta.flushTs
|
||||||
|
func syncSegmentsAtTs() segmentSyncPolicy {
|
||||||
|
return func(segments []*Segment, c Channel, ts Timestamp) []UniqueID {
|
||||||
|
flushTs := c.getFlushTs()
|
||||||
|
if flushTs != 0 && ts >= flushTs {
|
||||||
|
segmentsWithBuffer := lo.Filter(segments, func(segment *Segment, _ int) bool {
|
||||||
|
return !segment.isBufferEmpty()
|
||||||
|
})
|
||||||
|
segmentIDs := lo.Map(segmentsWithBuffer, func(segment *Segment, _ int) UniqueID {
|
||||||
|
return segment.segmentID
|
||||||
|
})
|
||||||
|
log.Info("sync segment at ts", zap.Int64s("segmentIDs", segmentIDs),
|
||||||
|
zap.Time("ts", tsoutil.PhysicalTime(ts)), zap.Time("flushTs", tsoutil.PhysicalTime(flushTs)))
|
||||||
|
return segmentIDs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -18,14 +18,13 @@ package datanode
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSyncPeriodically(t *testing.T) {
|
func TestSyncPeriodically(t *testing.T) {
|
||||||
@ -55,7 +54,7 @@ func TestSyncPeriodically(t *testing.T) {
|
|||||||
if test.isBufferEmpty {
|
if test.isBufferEmpty {
|
||||||
segment.curInsertBuf = nil
|
segment.curInsertBuf = nil
|
||||||
}
|
}
|
||||||
res := policy([]*Segment{segment}, tsoutil.ComposeTSByTime(test.endPosTs, 0), nil)
|
res := policy([]*Segment{segment}, nil, tsoutil.ComposeTSByTime(test.endPosTs, 0))
|
||||||
assert.Equal(t, test.shouldSyncNum, len(res))
|
assert.Equal(t, test.shouldSyncNum, len(res))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -65,7 +64,7 @@ func TestSyncMemoryTooHigh(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
testName string
|
testName string
|
||||||
syncSegmentNum int
|
syncSegmentNum int
|
||||||
needToSync bool
|
isHighMemory bool
|
||||||
memorySizesInMB []float64
|
memorySizesInMB []float64
|
||||||
shouldSyncSegs []UniqueID
|
shouldSyncSegs []UniqueID
|
||||||
}{
|
}{
|
||||||
@ -75,7 +74,7 @@ func TestSyncMemoryTooHigh(t *testing.T) {
|
|||||||
[]float64{1, 2, 3, 4, 5}, []UniqueID{5, 4}},
|
[]float64{1, 2, 3, 4, 5}, []UniqueID{5, 4}},
|
||||||
{"test normal 3", 5, true,
|
{"test normal 3", 5, true,
|
||||||
[]float64{1, 2, 3, 4, 5}, []UniqueID{5, 4, 3, 2, 1}},
|
[]float64{1, 2, 3, 4, 5}, []UniqueID{5, 4, 3, 2, 1}},
|
||||||
{"test needToSync false", 3, false,
|
{"test isHighMemory false", 3, false,
|
||||||
[]float64{1, 2, 3, 4, 5}, []UniqueID{}},
|
[]float64{1, 2, 3, 4, 5}, []UniqueID{}},
|
||||||
{"test syncSegmentNum 1", 1, true,
|
{"test syncSegmentNum 1", 1, true,
|
||||||
[]float64{1, 2, 3, 4, 5}, []UniqueID{5}},
|
[]float64{1, 2, 3, 4, 5}, []UniqueID{5}},
|
||||||
@ -85,6 +84,8 @@ func TestSyncMemoryTooHigh(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.testName, func(t *testing.T) {
|
t.Run(test.testName, func(t *testing.T) {
|
||||||
|
channel := newChannel("channel", 0, nil, nil, nil)
|
||||||
|
channel.setIsHighMemory(test.isHighMemory)
|
||||||
Params.Save(Params.DataNodeCfg.MemoryForceSyncSegmentNum.Key, fmt.Sprintf("%d", test.syncSegmentNum))
|
Params.Save(Params.DataNodeCfg.MemoryForceSyncSegmentNum.Key, fmt.Sprintf("%d", test.syncSegmentNum))
|
||||||
policy := syncMemoryTooHigh()
|
policy := syncMemoryTooHigh()
|
||||||
segments := make([]*Segment, len(test.memorySizesInMB))
|
segments := make([]*Segment, len(test.memorySizesInMB))
|
||||||
@ -93,8 +94,39 @@ func TestSyncMemoryTooHigh(t *testing.T) {
|
|||||||
segmentID: UniqueID(i + 1), memorySize: int64(test.memorySizesInMB[i] * 1024 * 1024),
|
segmentID: UniqueID(i + 1), memorySize: int64(test.memorySizesInMB[i] * 1024 * 1024),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
segs := policy(segments, 0, atomic.NewBool(test.needToSync))
|
segs := policy(segments, channel, 0)
|
||||||
assert.ElementsMatch(t, segs, test.shouldSyncSegs)
|
assert.ElementsMatch(t, segs, test.shouldSyncSegs)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncSegmentsAtTs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
testName string
|
||||||
|
ts Timestamp
|
||||||
|
flushTs Timestamp
|
||||||
|
shouldSyncNum int
|
||||||
|
}{
|
||||||
|
{"test ts < flushTs", 100, 200, 0},
|
||||||
|
{"test ts > flushTs", 300, 200, 1},
|
||||||
|
{"test ts = flushTs", 100, 100, 1},
|
||||||
|
{"test flushTs = 0", 100, 0, 0},
|
||||||
|
{"test flushTs = maxUint64", 100, math.MaxUint64, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.testName, func(t *testing.T) {
|
||||||
|
channel := newChannel("channel", 0, nil, nil, nil)
|
||||||
|
channel.setFlushTs(test.flushTs)
|
||||||
|
|
||||||
|
segment := &Segment{}
|
||||||
|
segment.setInsertBuffer(&BufferData{
|
||||||
|
startPos: &msgpb.MsgPosition{},
|
||||||
|
})
|
||||||
|
|
||||||
|
policy := syncSegmentsAtTs()
|
||||||
|
res := policy([]*Segment{segment}, channel, test.ts)
|
||||||
|
assert.Equal(t, test.shouldSyncNum, len(res))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -520,6 +520,30 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (node *DataNode) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
log := log.Ctx(ctx).With(zap.Int64("nodeId", paramtable.GetNodeID()),
|
||||||
|
zap.Time("flushTs", tsoutil.PhysicalTime(req.GetFlushTs())),
|
||||||
|
zap.Int("len(channels)", len(req.GetChannels())))
|
||||||
|
|
||||||
|
log.Info("DataNode receives FlushChannels request")
|
||||||
|
|
||||||
|
if !node.isHealthy() {
|
||||||
|
err := merr.WrapErrServiceNotReady(node.GetStateCode().String())
|
||||||
|
log.Warn("DataNode.FlushChannels failed", zap.Error(err))
|
||||||
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, channel := range req.GetChannels() {
|
||||||
|
fg, ok := node.flowgraphManager.getFlowgraphService(channel)
|
||||||
|
if !ok {
|
||||||
|
return merr.Status(merr.WrapErrChannelNotFound(channel)), nil
|
||||||
|
}
|
||||||
|
fg.channel.setFlushTs(req.GetFlushTs())
|
||||||
|
}
|
||||||
|
|
||||||
|
return merr.Status(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (node *DataNode) getPartitions(ctx context.Context, dbName string, collectionName string) (map[string]int64, error) {
|
func (node *DataNode) getPartitions(ctx context.Context, dbName string, collectionName string) (map[string]int64, error) {
|
||||||
req := &milvuspb.ShowPartitionsRequest{
|
req := &milvuspb.ShowPartitionsRequest{
|
||||||
Base: commonpbutil.NewMsgBase(
|
Base: commonpbutil.NewMsgBase(
|
||||||
|
|||||||
@ -807,3 +807,36 @@ func (s *DataNodeServicesSuite) TestResendSegmentStats() {
|
|||||||
s.Assert().True(merr.Ok(resp.GetStatus()))
|
s.Assert().True(merr.Ok(resp.GetStatus()))
|
||||||
s.Assert().ElementsMatch([]UniqueID{0, 1, 2}, resp.GetSegResent())
|
s.Assert().ElementsMatch([]UniqueID{0, 1, 2}, resp.GetSegResent())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DataNodeServicesSuite) TestFlushChannels() {
|
||||||
|
dmChannelName := "fake-by-dev-rootcoord-dml-channel-TestFlushChannels"
|
||||||
|
|
||||||
|
vChan := &datapb.VchannelInfo{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: dmChannelName,
|
||||||
|
UnflushedSegmentIds: []int64{},
|
||||||
|
FlushedSegmentIds: []int64{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.node.flowgraphManager.addAndStart(s.node, vChan, nil, genTestTickler())
|
||||||
|
s.Require().NoError(err)
|
||||||
|
|
||||||
|
fgService, ok := s.node.flowgraphManager.getFlowgraphService(dmChannelName)
|
||||||
|
s.Require().True(ok)
|
||||||
|
|
||||||
|
flushTs := Timestamp(100)
|
||||||
|
|
||||||
|
req := &datapb.FlushChannelsRequest{
|
||||||
|
Base: &commonpb.MsgBase{
|
||||||
|
TargetID: s.node.GetSession().ServerID,
|
||||||
|
},
|
||||||
|
FlushTs: flushTs,
|
||||||
|
Channels: []string{dmChannelName},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, err := s.node.FlushChannels(s.ctx, req)
|
||||||
|
s.Assert().NoError(err)
|
||||||
|
s.Assert().True(merr.Ok(status))
|
||||||
|
|
||||||
|
s.Assert().True(fgService.channel.getFlushTs() == flushTs)
|
||||||
|
}
|
||||||
|
|||||||
@ -442,8 +442,8 @@ func (c *Client) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
func (c *Client) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (c *Client) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return wrapGrpcCall(ctx, c, func(client datapb.DataCoordClient) (*milvuspb.GetFlushStateResponse, error) {
|
return wrapGrpcCall(ctx, c, func(client datapb.DataCoordClient) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return client.GetFlushState(ctx, req)
|
return client.GetFlushState(ctx, req)
|
||||||
})
|
})
|
||||||
|
|||||||
@ -368,8 +368,8 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
|
|||||||
return s.dataCoord.WatchChannels(ctx, req)
|
return s.dataCoord.WatchChannels(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (s *Server) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return s.dataCoord.GetFlushState(ctx, req)
|
return s.dataCoord.GetFlushState(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -207,7 +207,7 @@ func (m *MockDataCoord) WatchChannels(ctx context.Context, req *datapb.WatchChan
|
|||||||
return m.watchChannelsResp, m.err
|
return m.watchChannelsResp, m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockDataCoord) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (m *MockDataCoord) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return m.getFlushStateResp, m.err
|
return m.getFlushStateResp, m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -227,3 +227,10 @@ func (c *Client) SyncSegments(ctx context.Context, req *datapb.SyncSegmentsReque
|
|||||||
return client.SyncSegments(ctx, req)
|
return client.SyncSegments(ctx, req)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FlushChannels notifies DataNode to sync all the segments belongs to the target channels.
|
||||||
|
func (c *Client) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
return wrapGrpcCall(ctx, c, func(client datapb.DataNodeClient) (*commonpb.Status, error) {
|
||||||
|
return client.FlushChannels(ctx, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@ -400,3 +400,7 @@ func (s *Server) AddImportSegment(ctx context.Context, request *datapb.AddImport
|
|||||||
func (s *Server) SyncSegments(ctx context.Context, request *datapb.SyncSegmentsRequest) (*commonpb.Status, error) {
|
func (s *Server) SyncSegments(ctx context.Context, request *datapb.SyncSegmentsRequest) (*commonpb.Status, error) {
|
||||||
return s.datanode.SyncSegments(ctx, request)
|
return s.datanode.SyncSegments(ctx, request)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
return s.datanode.FlushChannels(ctx, req)
|
||||||
|
}
|
||||||
|
|||||||
@ -148,6 +148,10 @@ func (m *MockDataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegment
|
|||||||
return m.status, m.err
|
return m.status, m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MockDataNode) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
return m.status, m.err
|
||||||
|
}
|
||||||
|
|
||||||
// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
type mockDataCoord struct {
|
type mockDataCoord struct {
|
||||||
types.DataCoord
|
types.DataCoord
|
||||||
|
|||||||
@ -924,7 +924,7 @@ func (s *Server) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.
|
|||||||
return s.proxy.GetCompactionStateWithPlans(ctx, req)
|
return s.proxy.GetCompactionStateWithPlans(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return s.proxy.GetFlushState(ctx, req)
|
return s.proxy.GetFlushState(ctx, req)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -430,7 +430,7 @@ func (m *MockDataCoord) WatchChannels(ctx context.Context, req *datapb.WatchChan
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockDataCoord) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (m *MockDataCoord) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -806,15 +806,15 @@ func (_c *MockDataCoord_GetFlushAllState_Call) RunAndReturn(run func(context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState provides a mock function with given fields: ctx, req
|
// GetFlushState provides a mock function with given fields: ctx, req
|
||||||
func (_m *MockDataCoord) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (_m *MockDataCoord) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
ret := _m.Called(ctx, req)
|
ret := _m.Called(ctx, req)
|
||||||
|
|
||||||
var r0 *milvuspb.GetFlushStateResponse
|
var r0 *milvuspb.GetFlushStateResponse
|
||||||
var r1 error
|
var r1 error
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)); ok {
|
||||||
return rf(ctx, req)
|
return rf(ctx, req)
|
||||||
}
|
}
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.GetFlushStateRequest) *milvuspb.GetFlushStateResponse); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, *datapb.GetFlushStateRequest) *milvuspb.GetFlushStateResponse); ok {
|
||||||
r0 = rf(ctx, req)
|
r0 = rf(ctx, req)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
@ -822,7 +822,7 @@ func (_m *MockDataCoord) GetFlushState(ctx context.Context, req *milvuspb.GetFlu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.GetFlushStateRequest) error); ok {
|
if rf, ok := ret.Get(1).(func(context.Context, *datapb.GetFlushStateRequest) error); ok {
|
||||||
r1 = rf(ctx, req)
|
r1 = rf(ctx, req)
|
||||||
} else {
|
} else {
|
||||||
r1 = ret.Error(1)
|
r1 = ret.Error(1)
|
||||||
@ -838,14 +838,14 @@ type MockDataCoord_GetFlushState_Call struct {
|
|||||||
|
|
||||||
// GetFlushState is a helper method to define mock.On call
|
// GetFlushState is a helper method to define mock.On call
|
||||||
// - ctx context.Context
|
// - ctx context.Context
|
||||||
// - req *milvuspb.GetFlushStateRequest
|
// - req *datapb.GetFlushStateRequest
|
||||||
func (_e *MockDataCoord_Expecter) GetFlushState(ctx interface{}, req interface{}) *MockDataCoord_GetFlushState_Call {
|
func (_e *MockDataCoord_Expecter) GetFlushState(ctx interface{}, req interface{}) *MockDataCoord_GetFlushState_Call {
|
||||||
return &MockDataCoord_GetFlushState_Call{Call: _e.mock.On("GetFlushState", ctx, req)}
|
return &MockDataCoord_GetFlushState_Call{Call: _e.mock.On("GetFlushState", ctx, req)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockDataCoord_GetFlushState_Call) Run(run func(ctx context.Context, req *milvuspb.GetFlushStateRequest)) *MockDataCoord_GetFlushState_Call {
|
func (_c *MockDataCoord_GetFlushState_Call) Run(run func(ctx context.Context, req *datapb.GetFlushStateRequest)) *MockDataCoord_GetFlushState_Call {
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
run(args[0].(context.Context), args[1].(*milvuspb.GetFlushStateRequest))
|
run(args[0].(context.Context), args[1].(*datapb.GetFlushStateRequest))
|
||||||
})
|
})
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
@ -855,7 +855,7 @@ func (_c *MockDataCoord_GetFlushState_Call) Return(_a0 *milvuspb.GetFlushStateRe
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_c *MockDataCoord_GetFlushState_Call) RunAndReturn(run func(context.Context, *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)) *MockDataCoord_GetFlushState_Call {
|
func (_c *MockDataCoord_GetFlushState_Call) RunAndReturn(run func(context.Context, *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)) *MockDataCoord_GetFlushState_Call {
|
||||||
_c.Call.Return(run)
|
_c.Call.Return(run)
|
||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|||||||
@ -142,6 +142,61 @@ func (_c *MockDataNode_Compaction_Call) RunAndReturn(run func(context.Context, *
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FlushChannels provides a mock function with given fields: ctx, req
|
||||||
|
func (_m *MockDataNode) FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error) {
|
||||||
|
ret := _m.Called(ctx, req)
|
||||||
|
|
||||||
|
var r0 *commonpb.Status
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushChannelsRequest) (*commonpb.Status, error)); ok {
|
||||||
|
return rf(ctx, req)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *datapb.FlushChannelsRequest) *commonpb.Status); ok {
|
||||||
|
r0 = rf(ctx, req)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*commonpb.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *datapb.FlushChannelsRequest) error); ok {
|
||||||
|
r1 = rf(ctx, req)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockDataNode_FlushChannels_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FlushChannels'
|
||||||
|
type MockDataNode_FlushChannels_Call struct {
|
||||||
|
*mock.Call
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlushChannels is a helper method to define mock.On call
|
||||||
|
// - ctx context.Context
|
||||||
|
// - req *datapb.FlushChannelsRequest
|
||||||
|
func (_e *MockDataNode_Expecter) FlushChannels(ctx interface{}, req interface{}) *MockDataNode_FlushChannels_Call {
|
||||||
|
return &MockDataNode_FlushChannels_Call{Call: _e.mock.On("FlushChannels", ctx, req)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockDataNode_FlushChannels_Call) Run(run func(ctx context.Context, req *datapb.FlushChannelsRequest)) *MockDataNode_FlushChannels_Call {
|
||||||
|
_c.Call.Run(func(args mock.Arguments) {
|
||||||
|
run(args[0].(context.Context), args[1].(*datapb.FlushChannelsRequest))
|
||||||
|
})
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockDataNode_FlushChannels_Call) Return(_a0 *commonpb.Status, _a1 error) *MockDataNode_FlushChannels_Call {
|
||||||
|
_c.Call.Return(_a0, _a1)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *MockDataNode_FlushChannels_Call) RunAndReturn(run func(context.Context, *datapb.FlushChannelsRequest) (*commonpb.Status, error)) *MockDataNode_FlushChannels_Call {
|
||||||
|
_c.Call.Return(run)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// FlushSegments provides a mock function with given fields: ctx, req
|
// FlushSegments provides a mock function with given fields: ctx, req
|
||||||
func (_m *MockDataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmentsRequest) (*commonpb.Status, error) {
|
func (_m *MockDataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmentsRequest) (*commonpb.Status, error) {
|
||||||
ret := _m.Called(ctx, req)
|
ret := _m.Called(ctx, req)
|
||||||
|
|||||||
@ -54,7 +54,7 @@ service DataCoord {
|
|||||||
rpc GetCompactionStateWithPlans(milvus.GetCompactionPlansRequest) returns (milvus.GetCompactionPlansResponse) {}
|
rpc GetCompactionStateWithPlans(milvus.GetCompactionPlansRequest) returns (milvus.GetCompactionPlansResponse) {}
|
||||||
|
|
||||||
rpc WatchChannels(WatchChannelsRequest) returns (WatchChannelsResponse) {}
|
rpc WatchChannels(WatchChannelsRequest) returns (WatchChannelsResponse) {}
|
||||||
rpc GetFlushState(milvus.GetFlushStateRequest) returns (milvus.GetFlushStateResponse) {}
|
rpc GetFlushState(GetFlushStateRequest) returns (milvus.GetFlushStateResponse) {}
|
||||||
rpc DropVirtualChannel(DropVirtualChannelRequest) returns (DropVirtualChannelResponse) {}
|
rpc DropVirtualChannel(DropVirtualChannelRequest) returns (DropVirtualChannelResponse) {}
|
||||||
|
|
||||||
rpc SetSegmentState(SetSegmentStateRequest) returns (SetSegmentStateResponse) {}
|
rpc SetSegmentState(SetSegmentStateRequest) returns (SetSegmentStateResponse) {}
|
||||||
@ -108,6 +108,8 @@ service DataNode {
|
|||||||
rpc ResendSegmentStats(ResendSegmentStatsRequest) returns(ResendSegmentStatsResponse) {}
|
rpc ResendSegmentStats(ResendSegmentStatsRequest) returns(ResendSegmentStatsResponse) {}
|
||||||
|
|
||||||
rpc AddImportSegment(AddImportSegmentRequest) returns(AddImportSegmentResponse) {}
|
rpc AddImportSegment(AddImportSegmentRequest) returns(AddImportSegmentResponse) {}
|
||||||
|
|
||||||
|
rpc FlushChannels(FlushChannelsRequest) returns(common.Status) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
message FlushRequest {
|
message FlushRequest {
|
||||||
@ -125,6 +127,13 @@ message FlushResponse {
|
|||||||
repeated int64 segmentIDs = 4; // newly sealed segments
|
repeated int64 segmentIDs = 4; // newly sealed segments
|
||||||
repeated int64 flushSegmentIDs = 5; // old flushed segment
|
repeated int64 flushSegmentIDs = 5; // old flushed segment
|
||||||
int64 timeOfSeal = 6;
|
int64 timeOfSeal = 6;
|
||||||
|
uint64 flush_ts = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FlushChannelsRequest {
|
||||||
|
common.MsgBase base = 1;
|
||||||
|
uint64 flush_ts = 2;
|
||||||
|
repeated string channels = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SegmentIDRequest {
|
message SegmentIDRequest {
|
||||||
@ -677,3 +686,11 @@ message ReportDataNodeTtMsgsRequest {
|
|||||||
common.MsgBase base = 1;
|
common.MsgBase base = 1;
|
||||||
repeated msg.DataNodeTtMsg msgs = 2; // -1 means whole collection.
|
repeated msg.DataNodeTtMsg msgs = 2; // -1 means whole collection.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message GetFlushStateRequest {
|
||||||
|
repeated int64 segmentIDs = 1;
|
||||||
|
uint64 flush_ts = 2;
|
||||||
|
string db_name = 3;
|
||||||
|
string collection_name = 4;
|
||||||
|
int64 collectionID = 5;
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -254,7 +254,7 @@ func (coord *DataCoordMock) WatchChannels(ctx context.Context, req *datapb.Watch
|
|||||||
return &datapb.WatchChannelsResponse{}, nil
|
return &datapb.WatchChannelsResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (coord *DataCoordMock) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (coord *DataCoordMock) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return &milvuspb.GetFlushStateResponse{}, nil
|
return &milvuspb.GetFlushStateResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3600,7 +3600,7 @@ func (node *Proxy) GetCompactionStateWithPlans(ctx context.Context, req *milvusp
|
|||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-GetFlushState")
|
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-GetFlushState")
|
||||||
defer sp.End()
|
defer sp.End()
|
||||||
@ -3617,11 +3617,28 @@ func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStat
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err = node.dataCoord.GetFlushState(ctx, req)
|
if err = validateCollectionName(req.GetCollectionName()); err != nil {
|
||||||
|
resp.Status = merr.Status(err)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
collectionID, err := globalMetaCache.GetCollectionID(ctx, req.GetDbName(), req.GetCollectionName())
|
||||||
|
if err != nil {
|
||||||
|
resp.Status = merr.Status(err)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stateReq := &datapb.GetFlushStateRequest{
|
||||||
|
SegmentIDs: req.GetSegmentIDs(),
|
||||||
|
FlushTs: req.GetFlushTs(),
|
||||||
|
}
|
||||||
|
stateReq.CollectionID = collectionID
|
||||||
|
|
||||||
|
resp, err = node.dataCoord.GetFlushState(ctx, stateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("failed to get flush state response",
|
log.Warn("failed to get flush state response",
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
return nil, err
|
resp.Status = merr.Status(err)
|
||||||
|
return resp, nil
|
||||||
}
|
}
|
||||||
log.Debug("received get flush state response",
|
log.Debug("received get flush state response",
|
||||||
zap.Any("response", resp))
|
zap.Any("response", resp))
|
||||||
|
|||||||
@ -4041,10 +4041,24 @@ func Test_GetCompactionStateWithPlans(t *testing.T) {
|
|||||||
|
|
||||||
func Test_GetFlushState(t *testing.T) {
|
func Test_GetFlushState(t *testing.T) {
|
||||||
t.Run("normal test", func(t *testing.T) {
|
t.Run("normal test", func(t *testing.T) {
|
||||||
|
originCache := globalMetaCache
|
||||||
|
m := NewMockCache(t)
|
||||||
|
m.On("GetCollectionID",
|
||||||
|
mock.Anything,
|
||||||
|
mock.AnythingOfType("string"),
|
||||||
|
mock.AnythingOfType("string"),
|
||||||
|
).Return(UniqueID(1), nil)
|
||||||
|
globalMetaCache = m
|
||||||
|
defer func() {
|
||||||
|
globalMetaCache = originCache
|
||||||
|
}()
|
||||||
|
|
||||||
datacoord := &DataCoordMock{}
|
datacoord := &DataCoordMock{}
|
||||||
proxy := &Proxy{dataCoord: datacoord}
|
proxy := &Proxy{dataCoord: datacoord}
|
||||||
proxy.stateCode.Store(commonpb.StateCode_Healthy)
|
proxy.stateCode.Store(commonpb.StateCode_Healthy)
|
||||||
resp, err := proxy.GetFlushState(context.TODO(), nil)
|
resp, err := proxy.GetFlushState(context.TODO(), &milvuspb.GetFlushStateRequest{
|
||||||
|
CollectionName: "coll",
|
||||||
|
})
|
||||||
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{}, resp)
|
assert.EqualValues(t, &milvuspb.GetFlushStateResponse{}, resp)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|||||||
@ -1286,6 +1286,7 @@ func (ft *flushTask) Execute(ctx context.Context) error {
|
|||||||
coll2Segments := make(map[string]*schemapb.LongArray)
|
coll2Segments := make(map[string]*schemapb.LongArray)
|
||||||
flushColl2Segments := make(map[string]*schemapb.LongArray)
|
flushColl2Segments := make(map[string]*schemapb.LongArray)
|
||||||
coll2SealTimes := make(map[string]int64)
|
coll2SealTimes := make(map[string]int64)
|
||||||
|
coll2FlushTs := make(map[string]Timestamp)
|
||||||
for _, collName := range ft.CollectionNames {
|
for _, collName := range ft.CollectionNames {
|
||||||
collID, err := globalMetaCache.GetCollectionID(ctx, ft.GetDbName(), collName)
|
collID, err := globalMetaCache.GetCollectionID(ctx, ft.GetDbName(), collName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1309,6 +1310,7 @@ func (ft *flushTask) Execute(ctx context.Context) error {
|
|||||||
coll2Segments[collName] = &schemapb.LongArray{Data: resp.GetSegmentIDs()}
|
coll2Segments[collName] = &schemapb.LongArray{Data: resp.GetSegmentIDs()}
|
||||||
flushColl2Segments[collName] = &schemapb.LongArray{Data: resp.GetFlushSegmentIDs()}
|
flushColl2Segments[collName] = &schemapb.LongArray{Data: resp.GetFlushSegmentIDs()}
|
||||||
coll2SealTimes[collName] = resp.GetTimeOfSeal()
|
coll2SealTimes[collName] = resp.GetTimeOfSeal()
|
||||||
|
coll2FlushTs[collName] = resp.GetFlushTs()
|
||||||
}
|
}
|
||||||
ft.result = &milvuspb.FlushResponse{
|
ft.result = &milvuspb.FlushResponse{
|
||||||
Status: merr.Status(nil),
|
Status: merr.Status(nil),
|
||||||
@ -1316,6 +1318,7 @@ func (ft *flushTask) Execute(ctx context.Context) error {
|
|||||||
CollSegIDs: coll2Segments,
|
CollSegIDs: coll2Segments,
|
||||||
FlushCollSegIDs: flushColl2Segments,
|
FlushCollSegIDs: flushColl2Segments,
|
||||||
CollSealTimes: coll2SealTimes,
|
CollSealTimes: coll2SealTimes,
|
||||||
|
CollFlushTs: coll2FlushTs,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -102,6 +102,9 @@ type DataNode interface {
|
|||||||
|
|
||||||
// AddImportSegment puts the given import segment to current DataNode's flow graph.
|
// AddImportSegment puts the given import segment to current DataNode's flow graph.
|
||||||
AddImportSegment(ctx context.Context, req *datapb.AddImportSegmentRequest) (*datapb.AddImportSegmentResponse, error)
|
AddImportSegment(ctx context.Context, req *datapb.AddImportSegmentRequest) (*datapb.AddImportSegmentResponse, error)
|
||||||
|
|
||||||
|
// FlushChannels notifies DataNode to sync all the segments belongs to the target channels.
|
||||||
|
FlushChannels(ctx context.Context, req *datapb.FlushChannelsRequest) (*commonpb.Status, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DataNodeComponent is used by grpc server of DataNode
|
// DataNodeComponent is used by grpc server of DataNode
|
||||||
@ -147,7 +150,8 @@ type DataCoord interface {
|
|||||||
Component
|
Component
|
||||||
TimeTickProvider
|
TimeTickProvider
|
||||||
|
|
||||||
// Flush notifies DataCoord to flush all current growing segments of specified Collection
|
// Flush notifies DataCoord to flush all current growing segments of specified Collection,
|
||||||
|
// and flush all the buffers in DataNode and MsgStream into storage.
|
||||||
// ctx is the context to control request deadline and cancellation
|
// ctx is the context to control request deadline and cancellation
|
||||||
// req contains the request params, which are database name(not used for now) and collection id
|
// req contains the request params, which are database name(not used for now) and collection id
|
||||||
//
|
//
|
||||||
@ -302,8 +306,8 @@ type DataCoord interface {
|
|||||||
|
|
||||||
// WatchChannels notifies DataCoord to watch vchannels of a collection
|
// WatchChannels notifies DataCoord to watch vchannels of a collection
|
||||||
WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error)
|
WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error)
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)
|
GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)
|
||||||
// GetFlushAllState checks if all DML messages before `FlushAllTs` have been flushed.
|
// GetFlushAllState checks if all DML messages before `FlushAllTs` have been flushed.
|
||||||
GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error)
|
GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error)
|
||||||
// SetSegmentState updates a segment's state explicitly.
|
// SetSegmentState updates a segment's state explicitly.
|
||||||
@ -1360,7 +1364,7 @@ type ProxyComponent interface {
|
|||||||
GetCompactionState(ctx context.Context, req *milvuspb.GetCompactionStateRequest) (*milvuspb.GetCompactionStateResponse, error)
|
GetCompactionState(ctx context.Context, req *milvuspb.GetCompactionStateRequest) (*milvuspb.GetCompactionStateResponse, error)
|
||||||
ManualCompaction(ctx context.Context, req *milvuspb.ManualCompactionRequest) (*milvuspb.ManualCompactionResponse, error)
|
ManualCompaction(ctx context.Context, req *milvuspb.ManualCompactionRequest) (*milvuspb.ManualCompactionResponse, error)
|
||||||
GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.GetCompactionPlansRequest) (*milvuspb.GetCompactionPlansResponse, error)
|
GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.GetCompactionPlansRequest) (*milvuspb.GetCompactionPlansResponse, error)
|
||||||
// GetFlushState gets the flush state of multiple segments
|
// GetFlushState gets the flush state of the collection based on the provided flush ts and segment IDs.
|
||||||
GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)
|
GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error)
|
||||||
// GetFlushAllState checks if all DML messages before `FlushAllTs` have been flushed.
|
// GetFlushAllState checks if all DML messages before `FlushAllTs` have been flushed.
|
||||||
GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error)
|
GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushAllStateRequest) (*milvuspb.GetFlushAllStateResponse, error)
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func (m *DataCoordClient) GetCompactionStateWithPlans(ctx context.Context, req *
|
|||||||
func (m *DataCoordClient) WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest, opts ...grpc.CallOption) (*datapb.WatchChannelsResponse, error) {
|
func (m *DataCoordClient) WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest, opts ...grpc.CallOption) (*datapb.WatchChannelsResponse, error) {
|
||||||
return &datapb.WatchChannelsResponse{}, m.Err
|
return &datapb.WatchChannelsResponse{}, m.Err
|
||||||
}
|
}
|
||||||
func (m *DataCoordClient) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest, opts ...grpc.CallOption) (*milvuspb.GetFlushStateResponse, error) {
|
func (m *DataCoordClient) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest, opts ...grpc.CallOption) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return &milvuspb.GetFlushStateResponse{}, m.Err
|
return &milvuspb.GetFlushStateResponse{}, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -136,7 +136,7 @@ func (m *GrpcDataCoordClient) WatchChannels(ctx context.Context, req *datapb.Wat
|
|||||||
return &datapb.WatchChannelsResponse{}, m.Err
|
return &datapb.WatchChannelsResponse{}, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GrpcDataCoordClient) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest, opts ...grpc.CallOption) (*milvuspb.GetFlushStateResponse, error) {
|
func (m *GrpcDataCoordClient) GetFlushState(ctx context.Context, req *datapb.GetFlushStateRequest, opts ...grpc.CallOption) (*milvuspb.GetFlushStateResponse, error) {
|
||||||
return &milvuspb.GetFlushStateResponse{}, m.Err
|
return &milvuspb.GetFlushStateResponse{}, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -80,3 +80,7 @@ func (m *GrpcDataNodeClient) AddImportSegment(ctx context.Context, in *datapb.Ad
|
|||||||
func (m *GrpcDataNodeClient) SyncSegments(ctx context.Context, in *datapb.SyncSegmentsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
func (m *GrpcDataNodeClient) SyncSegments(ctx context.Context, in *datapb.SyncSegmentsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||||
return &commonpb.Status{}, m.Err
|
return &commonpb.Status{}, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *GrpcDataNodeClient) FlushChannels(ctx context.Context, in *datapb.FlushChannelsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||||
|
return &commonpb.Status{}, m.Err
|
||||||
|
}
|
||||||
|
|||||||
@ -152,12 +152,14 @@ func (s *TestGetVectorSuite) run() {
|
|||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.Require().NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
s.Require().True(has)
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collection]
|
||||||
|
s.Require().True(has)
|
||||||
|
|
||||||
segments, err := s.Cluster.MetaWatcher.ShowSegments()
|
segments, err := s.Cluster.MetaWatcher.ShowSegments()
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
s.Require().NotEmpty(segments)
|
s.Require().NotEmpty(segments)
|
||||||
|
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, s.dbName, collection)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
_, err = s.Cluster.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
_, err = s.Cluster.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -94,7 +94,9 @@ func (s *HelloMilvusSuite) TestHelloMilvus() {
|
|||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
s.True(has)
|
s.True(has)
|
||||||
|
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
@ -103,7 +105,7 @@ func (s *HelloMilvusSuite) TestHelloMilvus() {
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -68,9 +68,11 @@ func (s *GetIndexStatisticsSuite) TestGetIndexStatistics() {
|
|||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
s.Equal(true, has)
|
s.Equal(true, has)
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
indexName := "_default"
|
indexName := "_default"
|
||||||
|
|||||||
@ -24,7 +24,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
|
||||||
"github.com/milvus-io/milvus/pkg/common"
|
"github.com/milvus-io/milvus/pkg/common"
|
||||||
"github.com/milvus-io/milvus/tests/integration"
|
"github.com/milvus-io/milvus/tests/integration"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -733,7 +732,10 @@ func (s *JSONExprSuite) insertFlushIndexLoad(ctx context.Context, dbName, collec
|
|||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
|
s.True(has)
|
||||||
|
|
||||||
segments, err := s.Cluster.MetaWatcher.ShowSegments()
|
segments, err := s.Cluster.MetaWatcher.ShowSegments()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -741,28 +743,7 @@ func (s *JSONExprSuite) insertFlushIndexLoad(ctx context.Context, dbName, collec
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
if has && len(ids) > 0 {
|
|
||||||
flushed := func() bool {
|
|
||||||
resp, err := s.Cluster.Proxy.GetFlushState(ctx, &milvuspb.GetFlushStateRequest{
|
|
||||||
SegmentIDs: ids,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
//panic(errors.New("GetFlushState failed"))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return resp.GetFlushed()
|
|
||||||
}
|
|
||||||
for !flushed() {
|
|
||||||
// respect context deadline/cancel
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
panic(errors.New("deadline exceeded"))
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := s.Cluster.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := s.Cluster.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -22,7 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -226,9 +225,6 @@ func (s *MetaWatcherSuite) TestShowReplicas() {
|
|||||||
CollectionNames: []string{collectionName},
|
CollectionNames: []string{collectionName},
|
||||||
})
|
})
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
|
||||||
ids := segmentIDs.GetData()
|
|
||||||
s.NotEmpty(segmentIDs)
|
|
||||||
|
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -236,28 +232,13 @@ func (s *MetaWatcherSuite) TestShowReplicas() {
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
if has && len(ids) > 0 {
|
ids := segmentIDs.GetData()
|
||||||
flushed := func() bool {
|
s.Require().NotEmpty(segmentIDs)
|
||||||
resp, err := c.Proxy.GetFlushState(ctx, &milvuspb.GetFlushStateRequest{
|
s.Require().True(has)
|
||||||
SegmentIDs: ids,
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
})
|
s.True(has)
|
||||||
if err != nil {
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
//panic(errors.New("GetFlushState failed"))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return resp.GetFlushed()
|
|
||||||
}
|
|
||||||
for !flushed() {
|
|
||||||
// respect context deadline/cancel
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
panic(errors.New("deadline exceeded"))
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -92,9 +92,11 @@ func (s *RangeSearchSuite) TestRangeSearchIP() {
|
|||||||
})
|
})
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
s.True(has)
|
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
|
s.True(has)
|
||||||
|
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -102,7 +104,7 @@ func (s *RangeSearchSuite) TestRangeSearchIP() {
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
@ -240,9 +242,11 @@ func (s *RangeSearchSuite) TestRangeSearchL2() {
|
|||||||
})
|
})
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
s.True(has)
|
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
|
s.True(has)
|
||||||
|
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -250,7 +254,7 @@ func (s *RangeSearchSuite) TestRangeSearchL2() {
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -118,11 +118,13 @@ func (s *RefreshConfigSuite) TestRefreshDefaultIndexName() {
|
|||||||
})
|
})
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
s.True(has)
|
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
|
s.True(has)
|
||||||
|
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
_, err = c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
_, err = c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
|
|||||||
@ -92,9 +92,11 @@ func (s *UpsertSuite) TestUpsert() {
|
|||||||
})
|
})
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
s.True(has)
|
|
||||||
ids := segmentIDs.GetData()
|
ids := segmentIDs.GetData()
|
||||||
s.NotEmpty(segmentIDs)
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
|
s.True(has)
|
||||||
|
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -102,7 +104,7 @@ func (s *UpsertSuite) TestUpsert() {
|
|||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
}
|
}
|
||||||
s.WaitForFlush(ctx, ids)
|
s.WaitForFlush(ctx, ids, flushTs, dbName, collectionName)
|
||||||
|
|
||||||
// create index
|
// create index
|
||||||
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
|||||||
@ -26,10 +26,13 @@ import (
|
|||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *MiniClusterSuite) WaitForFlush(ctx context.Context, segIDs []int64) {
|
func (s *MiniClusterSuite) WaitForFlush(ctx context.Context, segIDs []int64, flushTs uint64, dbName, collectionName string) {
|
||||||
flushed := func() bool {
|
flushed := func() bool {
|
||||||
resp, err := s.Cluster.Proxy.GetFlushState(ctx, &milvuspb.GetFlushStateRequest{
|
resp, err := s.Cluster.Proxy.GetFlushState(ctx, &milvuspb.GetFlushStateRequest{
|
||||||
SegmentIDs: segIDs,
|
SegmentIDs: segIDs,
|
||||||
|
FlushTs: flushTs,
|
||||||
|
DbName: dbName,
|
||||||
|
CollectionName: collectionName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
|
|||||||
@ -12,7 +12,7 @@ allure-pytest==2.7.0
|
|||||||
pytest-print==0.2.1
|
pytest-print==0.2.1
|
||||||
pytest-level==0.1.1
|
pytest-level==0.1.1
|
||||||
pytest-xdist==2.5.0
|
pytest-xdist==2.5.0
|
||||||
pymilvus==2.3.0b0.post1.dev127
|
pymilvus==2.3.0.post1.dev4
|
||||||
pytest-rerunfailures==9.1.1
|
pytest-rerunfailures==9.1.1
|
||||||
git+https://github.com/Projectplace/pytest-tags
|
git+https://github.com/Projectplace/pytest-tags
|
||||||
ndg-httpsclient
|
ndg-httpsclient
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user