mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 01:28:27 +08:00
update target (#19296)
Signed-off-by: Wei Liu <wei.liu@zilliz.com> Signed-off-by: Wei Liu <wei.liu@zilliz.com>
This commit is contained in:
parent
21b54709a2
commit
c5cd92d36e
@ -111,5 +111,4 @@ type QueryCoordCatalog interface {
|
|||||||
ReleasePartition(collection int64, partitions ...int64) error
|
ReleasePartition(collection int64, partitions ...int64) error
|
||||||
ReleaseReplicas(collectionID int64) error
|
ReleaseReplicas(collectionID int64) error
|
||||||
ReleaseReplica(collection, replica int64) error
|
ReleaseReplica(collection, replica int64) error
|
||||||
RemoveHandoffEvent(segmentInfo *querypb.SegmentInfo) error
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -218,6 +218,7 @@ message SegmentLoadInfo {
|
|||||||
repeated FieldIndexInfo index_infos = 11;
|
repeated FieldIndexInfo index_infos = 11;
|
||||||
int64 segment_size = 12;
|
int64 segment_size = 12;
|
||||||
string insert_channel = 13;
|
string insert_channel = 13;
|
||||||
|
internal.MsgPosition start_position = 14;
|
||||||
}
|
}
|
||||||
|
|
||||||
message FieldIndexInfo {
|
message FieldIndexInfo {
|
||||||
@ -434,6 +435,7 @@ message LeaderView {
|
|||||||
string channel = 2;
|
string channel = 2;
|
||||||
map<int64, SegmentDist> segment_dist = 3;
|
map<int64, SegmentDist> segment_dist = 3;
|
||||||
repeated int64 growing_segmentIDs = 4;
|
repeated int64 growing_segmentIDs = 4;
|
||||||
|
map<int64, internal.MsgPosition> growing_segments = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SegmentDist {
|
message SegmentDist {
|
||||||
|
|||||||
@ -1440,6 +1440,7 @@ type SegmentLoadInfo struct {
|
|||||||
IndexInfos []*FieldIndexInfo `protobuf:"bytes,11,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
|
IndexInfos []*FieldIndexInfo `protobuf:"bytes,11,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
|
||||||
SegmentSize int64 `protobuf:"varint,12,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
|
SegmentSize int64 `protobuf:"varint,12,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
|
||||||
InsertChannel string `protobuf:"bytes,13,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
|
InsertChannel string `protobuf:"bytes,13,opt,name=insert_channel,json=insertChannel,proto3" json:"insert_channel,omitempty"`
|
||||||
|
StartPosition *internalpb.MsgPosition `protobuf:"bytes,14,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
@ -1561,6 +1562,13 @@ func (m *SegmentLoadInfo) GetInsertChannel() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *SegmentLoadInfo) GetStartPosition() *internalpb.MsgPosition {
|
||||||
|
if m != nil {
|
||||||
|
return m.StartPosition
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type FieldIndexInfo struct {
|
type FieldIndexInfo struct {
|
||||||
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
|
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
|
||||||
// deprecated
|
// deprecated
|
||||||
@ -3079,6 +3087,7 @@ type LeaderView struct {
|
|||||||
Channel string `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
|
Channel string `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
|
||||||
SegmentDist map[int64]*SegmentDist `protobuf:"bytes,3,rep,name=segment_dist,json=segmentDist,proto3" json:"segment_dist,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
SegmentDist map[int64]*SegmentDist `protobuf:"bytes,3,rep,name=segment_dist,json=segmentDist,proto3" json:"segment_dist,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
GrowingSegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=growing_segmentIDs,json=growingSegmentIDs,proto3" json:"growing_segmentIDs,omitempty"`
|
GrowingSegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=growing_segmentIDs,json=growingSegmentIDs,proto3" json:"growing_segmentIDs,omitempty"`
|
||||||
|
GrowingSegments map[int64]*internalpb.MsgPosition `protobuf:"bytes,5,rep,name=growing_segments,json=growingSegments,proto3" json:"growing_segments,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
@ -3137,6 +3146,13 @@ func (m *LeaderView) GetGrowingSegmentIDs() []int64 {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *LeaderView) GetGrowingSegments() map[int64]*internalpb.MsgPosition {
|
||||||
|
if m != nil {
|
||||||
|
return m.GrowingSegments
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type SegmentDist struct {
|
type SegmentDist struct {
|
||||||
NodeID int64 `protobuf:"varint,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
|
NodeID int64 `protobuf:"varint,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
|
||||||
Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
|
Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||||
@ -3692,6 +3708,7 @@ func init() {
|
|||||||
proto.RegisterType((*GetDataDistributionRequest)(nil), "milvus.proto.query.GetDataDistributionRequest")
|
proto.RegisterType((*GetDataDistributionRequest)(nil), "milvus.proto.query.GetDataDistributionRequest")
|
||||||
proto.RegisterType((*GetDataDistributionResponse)(nil), "milvus.proto.query.GetDataDistributionResponse")
|
proto.RegisterType((*GetDataDistributionResponse)(nil), "milvus.proto.query.GetDataDistributionResponse")
|
||||||
proto.RegisterType((*LeaderView)(nil), "milvus.proto.query.LeaderView")
|
proto.RegisterType((*LeaderView)(nil), "milvus.proto.query.LeaderView")
|
||||||
|
proto.RegisterMapType((map[int64]*internalpb.MsgPosition)(nil), "milvus.proto.query.LeaderView.GrowingSegmentsEntry")
|
||||||
proto.RegisterMapType((map[int64]*SegmentDist)(nil), "milvus.proto.query.LeaderView.SegmentDistEntry")
|
proto.RegisterMapType((map[int64]*SegmentDist)(nil), "milvus.proto.query.LeaderView.SegmentDistEntry")
|
||||||
proto.RegisterType((*SegmentDist)(nil), "milvus.proto.query.SegmentDist")
|
proto.RegisterType((*SegmentDist)(nil), "milvus.proto.query.SegmentDist")
|
||||||
proto.RegisterType((*SegmentVersionInfo)(nil), "milvus.proto.query.SegmentVersionInfo")
|
proto.RegisterType((*SegmentVersionInfo)(nil), "milvus.proto.query.SegmentVersionInfo")
|
||||||
@ -3708,237 +3725,241 @@ func init() {
|
|||||||
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
|
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
|
||||||
|
|
||||||
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
|
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
|
||||||
// 3669 bytes of a gzipped FileDescriptorProto
|
// 3730 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x6c, 0x1c, 0xd9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x49, 0x6c, 0x24, 0x59,
|
||||||
0x56, 0xae, 0xfe, 0xd8, 0xdd, 0xa7, 0x3f, 0x2e, 0x5f, 0x27, 0x4e, 0xbf, 0x7e, 0xf9, 0x78, 0x2a,
|
0x56, 0x15, 0xb9, 0xd8, 0x99, 0x2f, 0x17, 0x87, 0xbf, 0xab, 0x5c, 0x39, 0x39, 0xd5, 0xd5, 0xee,
|
||||||
0x9f, 0x31, 0xce, 0x1b, 0x3b, 0xcf, 0x99, 0x19, 0x32, 0xcc, 0x8c, 0x86, 0xc4, 0x9e, 0x38, 0x26,
|
0xa8, 0xae, 0x6e, 0xe3, 0x9e, 0xb6, 0x7b, 0x5c, 0x33, 0x4d, 0x0d, 0x33, 0xa3, 0xa1, 0xca, 0x9e,
|
||||||
0x89, 0xc7, 0x54, 0x27, 0x01, 0x45, 0x03, 0x3d, 0xd5, 0x5d, 0xb7, 0xdb, 0xa5, 0x54, 0x57, 0x75,
|
0x72, 0x9b, 0xee, 0xf2, 0x98, 0xc8, 0xaa, 0x02, 0xb5, 0x9a, 0xc9, 0x89, 0xcc, 0xf8, 0x99, 0x0e,
|
||||||
0xaa, 0xaa, 0x9d, 0x78, 0xd8, 0xb2, 0x61, 0x04, 0x48, 0xb0, 0x60, 0x85, 0x58, 0x81, 0x04, 0x68,
|
0x55, 0x2c, 0x59, 0x11, 0x91, 0xae, 0x76, 0x73, 0xe5, 0x32, 0x23, 0x40, 0x82, 0x03, 0x27, 0xc4,
|
||||||
0x06, 0xb1, 0x80, 0x1d, 0x42, 0x48, 0x2c, 0xd8, 0x21, 0x76, 0x2c, 0xd9, 0x22, 0x81, 0xc4, 0x6a,
|
0x09, 0x24, 0x90, 0xa6, 0x11, 0x07, 0xb8, 0x71, 0x40, 0xe2, 0xc0, 0x0d, 0x71, 0xe3, 0xc8, 0x15,
|
||||||
0x16, 0x2c, 0x90, 0xd0, 0xfd, 0xd5, 0xf7, 0x96, 0xdd, 0xb1, 0x93, 0xf9, 0x20, 0x76, 0x5d, 0xe7,
|
0x09, 0x24, 0x24, 0xa4, 0x39, 0x70, 0x40, 0x42, 0x7f, 0x8b, 0xf5, 0x87, 0x33, 0xca, 0xee, 0xea,
|
||||||
0x7e, 0xce, 0xb9, 0xe7, 0x7f, 0xce, 0xbd, 0x0d, 0x0b, 0xcf, 0x27, 0xd8, 0x3b, 0xec, 0xf6, 0x5d,
|
0x05, 0xcd, 0x2d, 0xe3, 0xfd, 0xe5, 0xbd, 0xff, 0xf6, 0xf7, 0xfe, 0x4f, 0x58, 0x7d, 0x36, 0xc7,
|
||||||
0xd7, 0x33, 0xd7, 0xc6, 0x9e, 0x1b, 0xb8, 0x08, 0x8d, 0x2c, 0xfb, 0x60, 0xe2, 0xb3, 0xaf, 0x35,
|
0xfe, 0xd9, 0x70, 0xec, 0x79, 0xbe, 0xb9, 0x3d, 0xf3, 0xbd, 0xd0, 0x43, 0xc8, 0xb1, 0xec, 0xd3,
|
||||||
0x3a, 0xde, 0xae, 0xf7, 0xdd, 0xd1, 0xc8, 0x75, 0x18, 0xac, 0x5d, 0x8f, 0xcf, 0x68, 0x37, 0x2d,
|
0x79, 0xc0, 0xbe, 0xb6, 0xe9, 0x78, 0xbf, 0x3d, 0xf6, 0x1c, 0xc7, 0x73, 0x19, 0xac, 0xdf, 0x4e,
|
||||||
0x27, 0xc0, 0x9e, 0x63, 0xd8, 0x62, 0xd4, 0xef, 0xef, 0xe3, 0x91, 0xc1, 0xbf, 0x54, 0xd3, 0x08,
|
0xce, 0xe8, 0x77, 0x2d, 0x37, 0xc4, 0xbe, 0x6b, 0xd8, 0x62, 0x34, 0x18, 0x9f, 0x60, 0xc7, 0xe0,
|
||||||
0x8c, 0xf8, 0xfe, 0xda, 0x6f, 0x2b, 0xb0, 0xd4, 0xd9, 0x77, 0x5f, 0x6c, 0xba, 0xb6, 0x8d, 0xfb,
|
0x5f, 0xaa, 0x69, 0x84, 0x46, 0x72, 0x7f, 0xed, 0xf7, 0x14, 0x58, 0x1f, 0x9c, 0x78, 0xcf, 0xf7,
|
||||||
0x81, 0xe5, 0x3a, 0xbe, 0x8e, 0x9f, 0x4f, 0xb0, 0x1f, 0xa0, 0x1b, 0x50, 0xea, 0x19, 0x3e, 0x6e,
|
0x3c, 0xdb, 0xc6, 0xe3, 0xd0, 0xf2, 0xdc, 0x40, 0xc7, 0xcf, 0xe6, 0x38, 0x08, 0xd1, 0x3b, 0x50,
|
||||||
0x29, 0xcb, 0xca, 0x4a, 0x6d, 0xe3, 0xfc, 0x5a, 0x82, 0x12, 0x4e, 0xc2, 0x43, 0x7f, 0x78, 0xc7,
|
0x1b, 0x19, 0x01, 0xee, 0x29, 0x1b, 0xca, 0x66, 0x6b, 0xf7, 0xc6, 0x76, 0x8a, 0x12, 0x4e, 0xc2,
|
||||||
0xf0, 0xb1, 0x4e, 0x67, 0x22, 0x04, 0x25, 0xb3, 0xb7, 0xb3, 0xd5, 0x2a, 0x2c, 0x2b, 0x2b, 0x45,
|
0xc3, 0x60, 0x7a, 0xdf, 0x08, 0xb0, 0x4e, 0x67, 0x22, 0x04, 0x35, 0x73, 0x74, 0xb8, 0xdf, 0xab,
|
||||||
0x9d, 0xfe, 0x46, 0x57, 0xa0, 0xd1, 0x0f, 0xf7, 0xde, 0xd9, 0xf2, 0x5b, 0xc5, 0xe5, 0xe2, 0x4a,
|
0x6c, 0x28, 0x9b, 0x55, 0x9d, 0xfe, 0x46, 0xaf, 0x43, 0x67, 0x1c, 0xed, 0x7d, 0xb8, 0x1f, 0xf4,
|
||||||
0x51, 0x4f, 0x02, 0xb5, 0x7f, 0x53, 0xe0, 0x5c, 0x86, 0x0c, 0x7f, 0xec, 0x3a, 0x3e, 0x46, 0x37,
|
0xaa, 0x1b, 0xd5, 0xcd, 0xaa, 0x9e, 0x06, 0x6a, 0xff, 0xa6, 0xc0, 0xf5, 0x1c, 0x19, 0xc1, 0xcc,
|
||||||
0x61, 0xd6, 0x0f, 0x8c, 0x60, 0xe2, 0x73, 0x4a, 0x7e, 0x2a, 0xa5, 0xa4, 0x43, 0xa7, 0xe8, 0x7c,
|
0x73, 0x03, 0x8c, 0xee, 0xc0, 0x52, 0x10, 0x1a, 0xe1, 0x3c, 0xe0, 0x94, 0x7c, 0x5d, 0x4a, 0xc9,
|
||||||
0x6a, 0x16, 0x6d, 0x41, 0x82, 0x16, 0xfd, 0x1c, 0xce, 0x58, 0xce, 0x43, 0x3c, 0x72, 0xbd, 0xc3,
|
0x80, 0x4e, 0xd1, 0xf9, 0xd4, 0x3c, 0xda, 0x8a, 0x04, 0x2d, 0xfa, 0x26, 0x5c, 0xb5, 0xdc, 0x87,
|
||||||
0xee, 0x18, 0x7b, 0x7d, 0xec, 0x04, 0xc6, 0x10, 0x0b, 0x1a, 0x17, 0xc5, 0xd8, 0x5e, 0x34, 0x84,
|
0xd8, 0xf1, 0xfc, 0xb3, 0xe1, 0x0c, 0xfb, 0x63, 0xec, 0x86, 0xc6, 0x14, 0x0b, 0x1a, 0xd7, 0xc4,
|
||||||
0xde, 0x87, 0x73, 0x4c, 0x4a, 0x3e, 0xf6, 0x0e, 0xac, 0x3e, 0xee, 0x1a, 0x07, 0x86, 0x65, 0x1b,
|
0xd8, 0x71, 0x3c, 0x84, 0xde, 0x85, 0xeb, 0x4c, 0x4a, 0x01, 0xf6, 0x4f, 0xad, 0x31, 0x1e, 0x1a,
|
||||||
0x3d, 0x1b, 0xb7, 0x4a, 0xcb, 0xc5, 0x95, 0x8a, 0x7e, 0x96, 0x0e, 0x77, 0xd8, 0xe8, 0x6d, 0x31,
|
0xa7, 0x86, 0x65, 0x1b, 0x23, 0x1b, 0xf7, 0x6a, 0x1b, 0xd5, 0xcd, 0x86, 0x7e, 0x8d, 0x0e, 0x0f,
|
||||||
0xa8, 0xfd, 0x99, 0x02, 0x67, 0xc9, 0x09, 0xf7, 0x0c, 0x2f, 0xb0, 0xde, 0x00, 0x9f, 0x35, 0xa8,
|
0xd8, 0xe8, 0x3d, 0x31, 0xa8, 0xfd, 0x85, 0x02, 0xd7, 0xc8, 0x09, 0x8f, 0x0d, 0x3f, 0xb4, 0x5e,
|
||||||
0xc7, 0xcf, 0xd6, 0x2a, 0xd2, 0xb1, 0x04, 0x8c, 0xcc, 0x19, 0x0b, 0xf4, 0x84, 0x27, 0x25, 0x7a,
|
0x02, 0x9f, 0x35, 0x68, 0x27, 0xcf, 0xd6, 0xab, 0xd2, 0xb1, 0x14, 0x8c, 0xcc, 0x99, 0x09, 0xf4,
|
||||||
0xcc, 0x04, 0x4c, 0xfb, 0x53, 0xae, 0x10, 0x71, 0x3a, 0x4f, 0x23, 0x88, 0x34, 0xce, 0x42, 0x16,
|
0x84, 0x27, 0x35, 0x7a, 0xcc, 0x14, 0x4c, 0xfb, 0x73, 0xae, 0x10, 0x49, 0x3a, 0x2f, 0x23, 0x88,
|
||||||
0xe7, 0x09, 0xc4, 0xa0, 0x7d, 0x55, 0x84, 0xb3, 0x0f, 0x5c, 0xc3, 0x8c, 0x14, 0xe6, 0xbb, 0x67,
|
0x2c, 0xce, 0x4a, 0x1e, 0xe7, 0x05, 0xc4, 0xa0, 0xfd, 0xac, 0x0a, 0xd7, 0x3e, 0xf0, 0x0c, 0x33,
|
||||||
0xe7, 0xc7, 0x30, 0xcb, 0xac, 0xab, 0x55, 0xa2, 0xb8, 0xae, 0x26, 0x71, 0x71, 0xcb, 0x8b, 0x28,
|
0x56, 0x98, 0xcf, 0x9f, 0x9d, 0xdf, 0x87, 0x25, 0x66, 0x5d, 0xbd, 0x1a, 0xc5, 0x75, 0x3b, 0x8d,
|
||||||
0xec, 0x50, 0x80, 0xce, 0x17, 0xa1, 0xab, 0xd0, 0xf4, 0xf0, 0xd8, 0xb6, 0xfa, 0x46, 0xd7, 0x99,
|
0x8b, 0x5b, 0x5e, 0x4c, 0xe1, 0x80, 0x02, 0x74, 0xbe, 0x08, 0xdd, 0x86, 0xae, 0x8f, 0x67, 0xb6,
|
||||||
0x8c, 0x7a, 0xd8, 0x6b, 0x95, 0x97, 0x95, 0x95, 0xb2, 0xde, 0xe0, 0xd0, 0x5d, 0x0a, 0x44, 0x5f,
|
0x35, 0x36, 0x86, 0xee, 0xdc, 0x19, 0x61, 0xbf, 0x57, 0xdf, 0x50, 0x36, 0xeb, 0x7a, 0x87, 0x43,
|
||||||
0x40, 0x63, 0x60, 0x61, 0xdb, 0xec, 0x5a, 0x8e, 0x89, 0x5f, 0xee, 0x6c, 0xb5, 0x66, 0x97, 0x8b,
|
0x8f, 0x28, 0x10, 0xfd, 0x04, 0x3a, 0x13, 0x0b, 0xdb, 0xe6, 0xd0, 0x72, 0x4d, 0xfc, 0xf1, 0xe1,
|
||||||
0x2b, 0xb5, 0x8d, 0x0f, 0xd7, 0xb2, 0x9e, 0x61, 0x4d, 0xca, 0x91, 0xb5, 0xbb, 0x64, 0xf9, 0x0e,
|
0x7e, 0x6f, 0x69, 0xa3, 0xba, 0xd9, 0xda, 0xfd, 0xee, 0x76, 0xde, 0x33, 0x6c, 0x4b, 0x39, 0xb2,
|
||||||
0x5b, 0xfd, 0xa9, 0x13, 0x78, 0x87, 0x7a, 0x7d, 0x10, 0x03, 0xb5, 0x3f, 0x81, 0x85, 0xcc, 0x14,
|
0xfd, 0x80, 0x2c, 0x3f, 0x64, 0xab, 0x7f, 0xe8, 0x86, 0xfe, 0x99, 0xde, 0x9e, 0x24, 0x40, 0xfd,
|
||||||
0xa4, 0x42, 0xf1, 0x19, 0x3e, 0xa4, 0x5c, 0x2c, 0xea, 0xe4, 0x27, 0x3a, 0x03, 0xe5, 0x03, 0xc3,
|
0x1f, 0xc0, 0x6a, 0x6e, 0x0a, 0x52, 0xa1, 0xfa, 0x14, 0x9f, 0x51, 0x2e, 0x56, 0x75, 0xf2, 0x13,
|
||||||
0x9e, 0x60, 0xce, 0x27, 0xf6, 0xf1, 0x4b, 0x85, 0x5b, 0x8a, 0xf6, 0xc7, 0x0a, 0xb4, 0x74, 0x6c,
|
0x5d, 0x85, 0xfa, 0xa9, 0x61, 0xcf, 0x31, 0xe7, 0x13, 0xfb, 0xf8, 0xb5, 0xca, 0x5d, 0x45, 0xfb,
|
||||||
0x63, 0xc3, 0xc7, 0xdf, 0xa7, 0x3c, 0x96, 0x60, 0xd6, 0x71, 0x4d, 0xbc, 0xb3, 0x45, 0xe5, 0x51,
|
0x53, 0x05, 0x7a, 0x3a, 0xb6, 0xb1, 0x11, 0xe0, 0x2f, 0x52, 0x1e, 0xeb, 0xb0, 0xe4, 0x7a, 0x26,
|
||||||
0xd4, 0xf9, 0x97, 0xf6, 0xdf, 0x0a, 0x9c, 0xd9, 0xc6, 0x01, 0x51, 0x4c, 0xcb, 0x0f, 0xac, 0x7e,
|
0x3e, 0xdc, 0xa7, 0xf2, 0xa8, 0xea, 0xfc, 0x4b, 0xfb, 0x1f, 0x05, 0xae, 0x1e, 0xe0, 0x90, 0x28,
|
||||||
0x68, 0x79, 0x1f, 0x43, 0xd1, 0xc3, 0xcf, 0x39, 0x65, 0xd7, 0x93, 0x94, 0x85, 0x7e, 0x54, 0xb6,
|
0xa6, 0x15, 0x84, 0xd6, 0x38, 0xb2, 0xbc, 0xef, 0x43, 0xd5, 0xc7, 0xcf, 0x38, 0x65, 0x6f, 0xa5,
|
||||||
0x52, 0x27, 0xeb, 0xd0, 0x5b, 0x50, 0x37, 0x47, 0x76, 0xb7, 0xbf, 0x6f, 0x38, 0x0e, 0xb6, 0x99,
|
0x29, 0x8b, 0xfc, 0xa8, 0x6c, 0xa5, 0x4e, 0xd6, 0xa1, 0xd7, 0xa0, 0x6d, 0x3a, 0xf6, 0x70, 0x7c,
|
||||||
0x6a, 0x57, 0xf5, 0x9a, 0x39, 0xb2, 0x37, 0x39, 0x08, 0x5d, 0x04, 0xf0, 0xf1, 0x70, 0x84, 0x9d,
|
0x62, 0xb8, 0x2e, 0xb6, 0x99, 0x6a, 0x37, 0xf5, 0x96, 0xe9, 0xd8, 0x7b, 0x1c, 0x84, 0x6e, 0x02,
|
||||||
0x20, 0x72, 0x7d, 0x31, 0x08, 0x5a, 0x85, 0x85, 0x81, 0xe7, 0x8e, 0xba, 0xfe, 0xbe, 0xe1, 0x99,
|
0x04, 0x78, 0xea, 0x60, 0x37, 0x8c, 0x5d, 0x5f, 0x02, 0x82, 0xb6, 0x60, 0x75, 0xe2, 0x7b, 0xce,
|
||||||
0x5d, 0x1b, 0x1b, 0x26, 0xf6, 0x28, 0xf5, 0x15, 0x7d, 0x9e, 0x0c, 0x74, 0x08, 0xfc, 0x01, 0x05,
|
0x30, 0x38, 0x31, 0x7c, 0x73, 0x68, 0x63, 0xc3, 0xc4, 0x3e, 0xa5, 0xbe, 0xa1, 0xaf, 0x90, 0x81,
|
||||||
0xa3, 0x9b, 0x50, 0xf6, 0xfb, 0xee, 0x18, 0x53, 0x35, 0x69, 0x6e, 0x5c, 0x90, 0x29, 0xc0, 0x96,
|
0x01, 0x81, 0x7f, 0x40, 0xc1, 0xe8, 0x0e, 0xd4, 0x83, 0xb1, 0x37, 0xc3, 0x54, 0x4d, 0xba, 0xbb,
|
||||||
0x11, 0x18, 0x1d, 0x32, 0x49, 0x67, 0x73, 0xb5, 0xbf, 0xe2, 0x76, 0xf2, 0x03, 0x77, 0x3b, 0x31,
|
0xaf, 0xc8, 0x14, 0x60, 0xdf, 0x08, 0x8d, 0x01, 0x99, 0xa4, 0xb3, 0xb9, 0xda, 0x5f, 0x73, 0x3b,
|
||||||
0x5b, 0x2a, 0xbf, 0x1e, 0x5b, 0x9a, 0x9d, 0xca, 0x96, 0xe6, 0x8e, 0xb6, 0xa5, 0x0c, 0xd7, 0xde,
|
0xf9, 0x92, 0xbb, 0x9d, 0x84, 0x2d, 0xd5, 0x3f, 0x1b, 0x5b, 0x5a, 0x2a, 0x65, 0x4b, 0xcb, 0xe7,
|
||||||
0xbc, 0x2d, 0xfd, 0x43, 0x64, 0x4b, 0x3f, 0x74, 0x99, 0x45, 0xf6, 0x56, 0x4e, 0xd8, 0xdb, 0x5f,
|
0xdb, 0x52, 0x8e, 0x6b, 0x2f, 0xdf, 0x96, 0xfe, 0x21, 0xb6, 0xa5, 0x2f, 0xbb, 0xcc, 0x62, 0x7b,
|
||||||
0x28, 0xf0, 0x93, 0x6d, 0x1c, 0x84, 0xe4, 0x13, 0xf3, 0xc1, 0x3f, 0xd0, 0x70, 0xf7, 0x8d, 0x02,
|
0xab, 0xa7, 0xec, 0xed, 0xaf, 0x14, 0xf8, 0xda, 0x01, 0x0e, 0x23, 0xf2, 0x89, 0xf9, 0xe0, 0x2f,
|
||||||
0x6d, 0x19, 0xad, 0xa7, 0x09, 0x79, 0x4f, 0x61, 0x29, 0xc4, 0xd1, 0x35, 0xb1, 0xdf, 0xf7, 0xac,
|
0x69, 0xb8, 0xfb, 0x54, 0x81, 0xbe, 0x8c, 0xd6, 0xcb, 0x84, 0xbc, 0x0f, 0x61, 0x3d, 0xc2, 0x31,
|
||||||
0x31, 0x15, 0x23, 0xf5, 0x10, 0xb5, 0x8d, 0xcb, 0x32, 0x75, 0x4b, 0x53, 0x70, 0x36, 0xdc, 0x62,
|
0x34, 0x71, 0x30, 0xf6, 0xad, 0x19, 0x15, 0x23, 0xf5, 0x10, 0xad, 0xdd, 0x5b, 0x32, 0x75, 0xcb,
|
||||||
0x2b, 0xb6, 0x83, 0xf6, 0x7b, 0x0a, 0x9c, 0x25, 0x1e, 0x89, 0xbb, 0x10, 0x67, 0xe0, 0x9e, 0x9c,
|
0x52, 0x70, 0x2d, 0xda, 0x62, 0x3f, 0xb1, 0x83, 0xf6, 0x07, 0x0a, 0x5c, 0x23, 0x1e, 0x89, 0xbb,
|
||||||
0xaf, 0x49, 0xe7, 0x54, 0xc8, 0x38, 0xa7, 0x29, 0x78, 0x4c, 0xf3, 0xc7, 0x34, 0x3d, 0xa7, 0xe1,
|
0x10, 0x77, 0xe2, 0x5d, 0x9c, 0xaf, 0x69, 0xe7, 0x54, 0xc9, 0x39, 0xa7, 0x12, 0x3c, 0xa6, 0xf9,
|
||||||
0xdd, 0x7b, 0x50, 0xb6, 0x9c, 0x81, 0x2b, 0x58, 0x75, 0x49, 0xc6, 0xaa, 0x38, 0x32, 0x36, 0x5b,
|
0x63, 0x96, 0x9e, 0xcb, 0xf0, 0xee, 0xdb, 0x50, 0xb7, 0xdc, 0x89, 0x27, 0x58, 0xf5, 0xaa, 0x8c,
|
||||||
0x73, 0x18, 0x15, 0x91, 0xb7, 0x3c, 0x85, 0xba, 0xa5, 0x8f, 0x5d, 0x90, 0x1c, 0xfb, 0x77, 0x15,
|
0x55, 0x49, 0x64, 0x6c, 0xb6, 0xe6, 0x32, 0x2a, 0x62, 0x6f, 0x79, 0x09, 0x75, 0xcb, 0x1e, 0xbb,
|
||||||
0x38, 0x97, 0x41, 0x78, 0x9a, 0x73, 0x7f, 0x04, 0xb3, 0x34, 0x06, 0x88, 0x83, 0x5f, 0x91, 0x1e,
|
0x22, 0x39, 0xf6, 0xef, 0x2b, 0x70, 0x3d, 0x87, 0xf0, 0x32, 0xe7, 0xfe, 0x1e, 0x2c, 0xd1, 0x18,
|
||||||
0x3c, 0x86, 0xee, 0x81, 0xe5, 0x07, 0x3a, 0x5f, 0xa3, 0xb9, 0xa0, 0xa6, 0xc7, 0x48, 0x74, 0xe2,
|
0x20, 0x0e, 0xfe, 0xba, 0xf4, 0xe0, 0x09, 0x74, 0x1f, 0x58, 0x41, 0xa8, 0xf3, 0x35, 0x9a, 0x07,
|
||||||
0x91, 0xa9, 0xeb, 0x18, 0x23, 0xc6, 0x80, 0xaa, 0x5e, 0xe3, 0xb0, 0x5d, 0x63, 0x84, 0xd1, 0x4f,
|
0x6a, 0x76, 0x8c, 0x44, 0x27, 0x1e, 0x99, 0x86, 0xae, 0xe1, 0x30, 0x06, 0x34, 0xf5, 0x16, 0x87,
|
||||||
0xa0, 0x42, 0x4c, 0xb6, 0x6b, 0x99, 0x42, 0xfc, 0x73, 0xd4, 0x84, 0x4d, 0x1f, 0x5d, 0x00, 0xa0,
|
0x1d, 0x19, 0x0e, 0x46, 0x5f, 0x83, 0x06, 0x31, 0xd9, 0xa1, 0x65, 0x0a, 0xf1, 0x2f, 0x53, 0x13,
|
||||||
0x43, 0x86, 0x69, 0x7a, 0x2c, 0x70, 0x55, 0xf5, 0x2a, 0x81, 0xdc, 0x26, 0x00, 0xed, 0x0f, 0x14,
|
0x36, 0x03, 0xf4, 0x0a, 0x00, 0x1d, 0x32, 0x4c, 0xd3, 0x67, 0x81, 0xab, 0xa9, 0x37, 0x09, 0xe4,
|
||||||
0xa8, 0x13, 0x07, 0xf9, 0x10, 0x07, 0x06, 0x91, 0x03, 0xfa, 0x00, 0xaa, 0xb6, 0x6b, 0x98, 0xdd,
|
0x1e, 0x01, 0x68, 0x7f, 0xa4, 0x40, 0x9b, 0x38, 0xc8, 0x87, 0x38, 0x34, 0x88, 0x1c, 0xd0, 0x77,
|
||||||
0xe0, 0x70, 0xcc, 0x50, 0x35, 0xd3, 0xbc, 0x8e, 0xbc, 0xea, 0xa3, 0xc3, 0x31, 0xd6, 0x2b, 0x36,
|
0xa0, 0x69, 0x7b, 0x86, 0x39, 0x0c, 0xcf, 0x66, 0x0c, 0x55, 0x37, 0xcb, 0xeb, 0xd8, 0xab, 0x3e,
|
||||||
0xff, 0x35, 0x0d, 0xbf, 0x33, 0xa6, 0x5c, 0x94, 0x98, 0xf2, 0xd7, 0x65, 0x58, 0xfa, 0x35, 0x23,
|
0x3a, 0x9b, 0x61, 0xbd, 0x61, 0xf3, 0x5f, 0x65, 0xf8, 0x9d, 0x33, 0xe5, 0xaa, 0xc4, 0x94, 0x7f,
|
||||||
0xe8, 0xef, 0x6f, 0x8d, 0x44, 0xfc, 0x3d, 0xb9, 0x12, 0x44, 0xbe, 0xad, 0x10, 0xf7, 0x6d, 0xaf,
|
0x5e, 0x87, 0xf5, 0xdf, 0x32, 0xc2, 0xf1, 0xc9, 0xbe, 0x23, 0xe2, 0xef, 0xc5, 0x95, 0x20, 0xf6,
|
||||||
0xcd, 0x77, 0x86, 0x7a, 0x5e, 0x96, 0xe9, 0x39, 0x29, 0xd3, 0xd6, 0x9e, 0x70, 0x51, 0xc5, 0xf4,
|
0x6d, 0x95, 0xa4, 0x6f, 0xfb, 0xcc, 0x7c, 0x67, 0xa4, 0xe7, 0x75, 0x99, 0x9e, 0x93, 0x32, 0x6d,
|
||||||
0x3c, 0x16, 0x26, 0x67, 0x4f, 0x12, 0x26, 0x37, 0xa1, 0x81, 0x5f, 0xf6, 0xed, 0x09, 0x91, 0x39,
|
0xfb, 0x09, 0x17, 0x55, 0x42, 0xcf, 0x13, 0x61, 0x72, 0xe9, 0x22, 0x61, 0x72, 0x0f, 0x3a, 0xf8,
|
||||||
0xc5, 0xce, 0xe2, 0xdf, 0x45, 0x09, 0xf6, 0xb8, 0x91, 0xd5, 0xf9, 0xa2, 0x1d, 0x4e, 0x03, 0x13,
|
0xe3, 0xb1, 0x3d, 0x27, 0x32, 0xa7, 0xd8, 0x59, 0xfc, 0xbb, 0x29, 0xc1, 0x9e, 0x34, 0xb2, 0x36,
|
||||||
0xf5, 0x08, 0x07, 0x46, 0xab, 0x42, 0xc9, 0x58, 0xce, 0x13, 0xb5, 0xd0, 0x0f, 0x26, 0x6e, 0xf2,
|
0x5f, 0x74, 0xc8, 0x69, 0x60, 0xa2, 0x76, 0x70, 0x68, 0xf4, 0x1a, 0x94, 0x8c, 0x8d, 0x22, 0x51,
|
||||||
0x85, 0xce, 0x43, 0x95, 0x07, 0xe5, 0x9d, 0xad, 0x56, 0x95, 0xb2, 0x2f, 0x02, 0x20, 0x03, 0x1a,
|
0x0b, 0xfd, 0x60, 0xe2, 0x26, 0x5f, 0xe8, 0x06, 0x34, 0x79, 0x50, 0x3e, 0xdc, 0xef, 0x35, 0x29,
|
||||||
0xdc, 0x03, 0x71, 0x0a, 0x81, 0x52, 0xf8, 0x91, 0x0c, 0x81, 0x5c, 0xd8, 0x71, 0xca, 0x7d, 0x1e,
|
0xfb, 0x62, 0x00, 0x32, 0xa0, 0xc3, 0x3d, 0x10, 0xa7, 0x10, 0x28, 0x85, 0xdf, 0x93, 0x21, 0x90,
|
||||||
0xa2, 0xfd, 0x18, 0x88, 0x94, 0x86, 0xee, 0x60, 0x60, 0x5b, 0x0e, 0xde, 0x65, 0x12, 0xae, 0x51,
|
0x0b, 0x3b, 0x49, 0x79, 0xc0, 0x43, 0x74, 0x90, 0x00, 0x91, 0xd2, 0xd0, 0x9b, 0x4c, 0x6c, 0xcb,
|
||||||
0x22, 0x92, 0xc0, 0x76, 0x17, 0x16, 0x32, 0x1b, 0x49, 0x02, 0xf9, 0xbb, 0xf1, 0x40, 0x7e, 0x3c,
|
0xc5, 0x47, 0x4c, 0xc2, 0x2d, 0x4a, 0x44, 0x1a, 0xd8, 0x1f, 0xc2, 0x6a, 0x6e, 0x23, 0x49, 0x20,
|
||||||
0x27, 0x63, 0x81, 0xfe, 0xcf, 0x15, 0x38, 0xfb, 0xd8, 0xf1, 0x27, 0xbd, 0xf0, 0x04, 0xdf, 0x8f,
|
0xff, 0x56, 0x32, 0x90, 0x2f, 0xe6, 0x64, 0x22, 0xd0, 0xff, 0xa5, 0x02, 0xd7, 0x1e, 0xbb, 0xc1,
|
||||||
0xb6, 0xa6, 0xfd, 0x44, 0x29, 0xe3, 0x27, 0xb4, 0xbf, 0x2f, 0xc1, 0x3c, 0x3f, 0x05, 0x11, 0x2a,
|
0x7c, 0x14, 0x9d, 0xe0, 0x8b, 0xd1, 0xd6, 0xac, 0x9f, 0xa8, 0xe5, 0xfc, 0x84, 0xf6, 0xd3, 0x3a,
|
||||||
0x35, 0xf8, 0xf3, 0x50, 0x0d, 0x43, 0x05, 0x67, 0x48, 0x04, 0x40, 0xcb, 0x50, 0x8b, 0xa9, 0x3b,
|
0xac, 0xf0, 0x53, 0x10, 0xa1, 0x52, 0x83, 0xbf, 0x01, 0xcd, 0x28, 0x54, 0x70, 0x86, 0xc4, 0x00,
|
||||||
0xa7, 0x2a, 0x0e, 0x9a, 0x8a, 0x34, 0x11, 0xf8, 0x4b, 0xb1, 0xc0, 0x7f, 0x01, 0x60, 0x60, 0x4f,
|
0xb4, 0x01, 0xad, 0x84, 0xba, 0x73, 0xaa, 0x92, 0xa0, 0x52, 0xa4, 0x89, 0xc0, 0x5f, 0x4b, 0x04,
|
||||||
0xfc, 0xfd, 0x6e, 0x60, 0x8d, 0x30, 0x4f, 0x3c, 0xaa, 0x14, 0xf2, 0xc8, 0x1a, 0x61, 0x74, 0x1b,
|
0xfe, 0x57, 0x00, 0x26, 0xf6, 0x3c, 0x38, 0x19, 0x86, 0x96, 0x83, 0x79, 0xe2, 0xd1, 0xa4, 0x90,
|
||||||
0xea, 0x3d, 0xcb, 0xb1, 0xdd, 0x61, 0x77, 0x6c, 0x04, 0xfb, 0x3e, 0x2f, 0x96, 0x64, 0x62, 0xa1,
|
0x47, 0x96, 0x83, 0xd1, 0x3d, 0x68, 0x8f, 0x2c, 0xd7, 0xf6, 0xa6, 0xc3, 0x99, 0x11, 0x9e, 0x04,
|
||||||
0x69, 0xda, 0x1d, 0x3a, 0x57, 0xaf, 0xb1, 0x35, 0x7b, 0x64, 0x09, 0xba, 0x08, 0x35, 0x67, 0x32,
|
0xbc, 0x58, 0x92, 0x89, 0x85, 0xa6, 0x69, 0xf7, 0xe9, 0x5c, 0xbd, 0xc5, 0xd6, 0x1c, 0x93, 0x25,
|
||||||
0xea, 0xba, 0x83, 0xae, 0xe7, 0xbe, 0x20, 0x26, 0x42, 0x51, 0x38, 0x93, 0xd1, 0x67, 0x03, 0xdd,
|
0xe8, 0x26, 0xb4, 0xdc, 0xb9, 0x33, 0xf4, 0x26, 0x43, 0xdf, 0x7b, 0x4e, 0x4c, 0x84, 0xa2, 0x70,
|
||||||
0x7d, 0x41, 0x5c, 0x75, 0x95, 0x38, 0x6d, 0xdf, 0x76, 0x87, 0x7e, 0xab, 0x32, 0xd5, 0xfe, 0xd1,
|
0xe7, 0xce, 0x8f, 0x26, 0xba, 0xf7, 0x9c, 0xb8, 0xea, 0x26, 0x71, 0xda, 0x81, 0xed, 0x4d, 0x83,
|
||||||
0x02, 0xb2, 0xda, 0xc4, 0x76, 0x60, 0xd0, 0xd5, 0xd5, 0xe9, 0x56, 0x87, 0x0b, 0xd0, 0x35, 0x68,
|
0x5e, 0xa3, 0xd4, 0xfe, 0xf1, 0x02, 0xb2, 0xda, 0xc4, 0x76, 0x68, 0xd0, 0xd5, 0xcd, 0x72, 0xab,
|
||||||
0xf6, 0xdd, 0xd1, 0xd8, 0xa0, 0x1c, 0xba, 0xeb, 0xb9, 0x23, 0x6a, 0x1f, 0x45, 0x3d, 0x05, 0x45,
|
0xa3, 0x05, 0xe8, 0x0d, 0xe8, 0x8e, 0x3d, 0x67, 0x66, 0x50, 0x0e, 0x3d, 0xf0, 0x3d, 0x87, 0xda,
|
||||||
0x9b, 0x50, 0xa3, 0x29, 0x2e, 0x37, 0xa2, 0x1a, 0xc5, 0xa3, 0xc9, 0x8c, 0x28, 0x96, 0xad, 0x12,
|
0x47, 0x55, 0xcf, 0x40, 0xd1, 0x1e, 0xb4, 0x68, 0x8a, 0xcb, 0x8d, 0xa8, 0x45, 0xf1, 0x68, 0x32,
|
||||||
0x05, 0x05, 0x4b, 0xfc, 0xf4, 0x89, 0x66, 0x08, 0x5b, 0xf4, 0xad, 0x2f, 0x71, 0xab, 0xce, 0xa4,
|
0x23, 0x4a, 0x64, 0xab, 0x44, 0x41, 0xc1, 0x12, 0x3f, 0x03, 0xa2, 0x19, 0xc2, 0x16, 0x03, 0xeb,
|
||||||
0xc8, 0x61, 0x1d, 0xeb, 0x4b, 0x4c, 0xf2, 0x6e, 0xcb, 0xf1, 0xb1, 0x17, 0x88, 0x2a, 0xa8, 0xd5,
|
0x13, 0xdc, 0x6b, 0x33, 0x29, 0x72, 0xd8, 0xc0, 0xfa, 0x04, 0x93, 0xbc, 0xdb, 0x72, 0x03, 0xec,
|
||||||
0xa0, 0xea, 0xd3, 0x60, 0x50, 0xae, 0xd8, 0xda, 0x7f, 0x15, 0xa0, 0x99, 0x44, 0x84, 0x5a, 0x30,
|
0x87, 0xa2, 0x0a, 0xea, 0x75, 0xa8, 0xfa, 0x74, 0x18, 0x94, 0x2b, 0x36, 0x3a, 0x84, 0x6e, 0x10,
|
||||||
0xc7, 0x12, 0x67, 0xa1, 0x3d, 0xe2, 0x93, 0xa0, 0xc5, 0x8e, 0xd1, 0xb3, 0x31, 0xcb, 0xd2, 0xa9,
|
0x1a, 0x7e, 0x38, 0x9c, 0x79, 0x01, 0x55, 0x80, 0x5e, 0x97, 0xea, 0xb6, 0x56, 0x50, 0x73, 0x3d,
|
||||||
0xf2, 0x54, 0xf4, 0x1a, 0x83, 0xd1, 0x0d, 0x88, 0x12, 0xb0, 0xe3, 0x51, 0x8d, 0x2d, 0x52, 0x94,
|
0x0c, 0xa6, 0xc7, 0x7c, 0xa6, 0xde, 0xa1, 0x2b, 0xc5, 0xa7, 0xf6, 0x5f, 0x15, 0xe8, 0xa6, 0x69,
|
||||||
0x55, 0x0a, 0xa1, 0x71, 0xad, 0x05, 0x73, 0x22, 0xc1, 0x67, 0xaa, 0x23, 0x3e, 0xc9, 0x48, 0x6f,
|
0x46, 0x3d, 0x58, 0x66, 0x39, 0xb8, 0x50, 0x44, 0xf1, 0x49, 0x4e, 0x80, 0x5d, 0x63, 0x64, 0x63,
|
||||||
0x62, 0x51, 0xac, 0x4c, 0x75, 0xc4, 0x27, 0xda, 0x82, 0x3a, 0xdb, 0x72, 0x6c, 0x78, 0xc6, 0x48,
|
0x96, 0xf0, 0x53, 0x3d, 0x6c, 0xe8, 0x2d, 0x06, 0xa3, 0x1b, 0x10, 0x7d, 0x62, 0x9c, 0xa2, 0xca,
|
||||||
0x28, 0xce, 0x5b, 0x52, 0xe3, 0xbb, 0x8f, 0x0f, 0x9f, 0x10, 0x3b, 0xde, 0x33, 0x2c, 0x4f, 0x67,
|
0x5f, 0xa5, 0xd4, 0x37, 0x29, 0x84, 0x86, 0xc8, 0x1e, 0x2c, 0x8b, 0x5a, 0x81, 0x69, 0xa1, 0xf8,
|
||||||
0x8c, 0xde, 0xa3, 0xab, 0xd0, 0x0a, 0xa8, 0x6c, 0x97, 0x81, 0x65, 0x63, 0xae, 0x82, 0x73, 0x34,
|
0x24, 0x23, 0xa3, 0xb9, 0x45, 0xb1, 0x32, 0x2d, 0x14, 0x9f, 0x68, 0x1f, 0xda, 0x6c, 0xcb, 0x99,
|
||||||
0x78, 0x36, 0x29, 0xfc, 0xae, 0x65, 0x63, 0xa6, 0x65, 0xe1, 0x11, 0x28, 0x6b, 0x2b, 0x4c, 0xc9,
|
0xe1, 0x1b, 0x8e, 0xd0, 0xc1, 0xd7, 0xa4, 0x76, 0xfc, 0x3e, 0x3e, 0x7b, 0x42, 0x5c, 0xc2, 0xb1,
|
||||||
0x28, 0x84, 0x32, 0xf6, 0x32, 0x34, 0xd8, 0xf0, 0x01, 0xf6, 0x7c, 0xcb, 0x75, 0xb8, 0xa7, 0x64,
|
0x61, 0xf9, 0x3a, 0x93, 0xd9, 0x31, 0x5d, 0x85, 0x36, 0x41, 0x65, 0xbb, 0x4c, 0x2c, 0x1b, 0x73,
|
||||||
0x34, 0x3e, 0x61, 0x30, 0x1a, 0xbf, 0x27, 0x23, 0xa6, 0xa6, 0xc0, 0x8e, 0xe3, 0x4c, 0x46, 0x44,
|
0x6d, 0x5e, 0xa6, 0x71, 0xb8, 0x4b, 0xe1, 0x0f, 0x2c, 0x1b, 0x33, 0x85, 0x8d, 0x8e, 0x40, 0xa5,
|
||||||
0x49, 0xb5, 0x3f, 0x2c, 0xc1, 0x22, 0xb1, 0x55, 0x6e, 0xb6, 0xa7, 0x88, 0x84, 0x17, 0x00, 0x4c,
|
0xd4, 0x60, 0xfa, 0x4a, 0x21, 0x54, 0x46, 0xb7, 0xa0, 0xc3, 0x86, 0x4f, 0xb1, 0x1f, 0x10, 0xde,
|
||||||
0x3f, 0xe8, 0x26, 0xfc, 0x4b, 0xd5, 0xf4, 0x03, 0xe6, 0x27, 0xd1, 0x07, 0x22, 0x90, 0x15, 0xf3,
|
0x33, 0xa7, 0xcb, 0x68, 0x7c, 0xc2, 0x60, 0x34, 0x15, 0x98, 0x3b, 0x4c, 0xe3, 0x81, 0x1d, 0xc7,
|
||||||
0x73, 0xdb, 0x94, 0xef, 0xc8, 0x06, 0xb3, 0x13, 0xf5, 0x4f, 0x2e, 0x43, 0xc3, 0x77, 0x27, 0x5e,
|
0x9d, 0x3b, 0x44, 0xdf, 0xb5, 0x3f, 0xae, 0xc1, 0x1a, 0x31, 0x7b, 0xee, 0x01, 0x2e, 0x11, 0x54,
|
||||||
0x1f, 0x77, 0x13, 0x55, 0x48, 0x9d, 0x01, 0x77, 0xe5, 0x1e, 0x70, 0x56, 0xda, 0xc7, 0x89, 0x05,
|
0x5f, 0x01, 0x30, 0x83, 0x70, 0x98, 0x72, 0x55, 0x4d, 0x33, 0x08, 0x99, 0xcb, 0x45, 0xdf, 0x11,
|
||||||
0xb4, 0xb9, 0xd3, 0x05, 0xb4, 0x4a, 0x3a, 0xa0, 0xdd, 0x87, 0x79, 0x6a, 0xbe, 0xdd, 0xb1, 0xeb,
|
0x31, 0xb1, 0x5a, 0x9c, 0x26, 0x67, 0xdc, 0x50, 0x3e, 0x2e, 0x5e, 0xa8, 0x15, 0x73, 0x0b, 0x3a,
|
||||||
0xb3, 0x62, 0x8e, 0x5b, 0xbd, 0x96, 0xd3, 0x6f, 0x78, 0xe8, 0x0f, 0xf7, 0xf8, 0x54, 0xbd, 0x49,
|
0x81, 0x37, 0xf7, 0xc7, 0x78, 0x98, 0x2a, 0x68, 0xda, 0x0c, 0x78, 0x24, 0x77, 0xa6, 0x4b, 0xd2,
|
||||||
0x97, 0x8a, 0x4f, 0x9f, 0xa8, 0xaf, 0xd0, 0x07, 0x2e, 0x6f, 0xfe, 0x49, 0x98, 0xe1, 0x60, 0x6c,
|
0x96, 0x50, 0x22, 0x36, 0x2e, 0x5f, 0x2e, 0x36, 0x36, 0xb2, 0xb1, 0xf1, 0x7d, 0x58, 0xa1, 0x9e,
|
||||||
0x76, 0x03, 0xcf, 0x70, 0xfc, 0x01, 0xf6, 0x68, 0x50, 0xab, 0xe8, 0x75, 0x02, 0x7c, 0xc4, 0x61,
|
0x20, 0xb2, 0x22, 0xe1, 0x40, 0xca, 0x98, 0x51, 0x97, 0x2e, 0x15, 0x9f, 0x01, 0x51, 0x5f, 0xa1,
|
||||||
0xda, 0x3f, 0x17, 0x60, 0x89, 0xd7, 0x96, 0xa7, 0xd7, 0x8b, 0xbc, 0x98, 0x23, 0x9c, 0x76, 0xf1,
|
0x0f, 0x5c, 0xde, 0xfc, 0x93, 0x30, 0xc3, 0xc5, 0xd8, 0x1c, 0x86, 0xbe, 0xe1, 0x06, 0x13, 0xec,
|
||||||
0x88, 0x6a, 0xad, 0x34, 0x45, 0xd6, 0x54, 0x96, 0x64, 0x4d, 0xc9, 0x8a, 0x65, 0x36, 0x53, 0xb1,
|
0xd3, 0xf8, 0xd8, 0xd0, 0xdb, 0x04, 0xf8, 0x88, 0xc3, 0xb4, 0x7f, 0xae, 0xc0, 0x3a, 0x2f, 0x53,
|
||||||
0x84, 0x2d, 0x92, 0xb9, 0xe9, 0x5b, 0x24, 0xa4, 0x16, 0xa7, 0x69, 0x34, 0x95, 0x5d, 0x55, 0x67,
|
0x2f, 0xaf, 0x17, 0x45, 0xe1, 0x4b, 0xf8, 0xff, 0xea, 0x39, 0x85, 0x5f, 0xad, 0x44, 0x02, 0x56,
|
||||||
0x1f, 0xd3, 0x31, 0xf4, 0x3f, 0x14, 0x68, 0x74, 0xb0, 0xe1, 0xf5, 0xf7, 0x05, 0x1f, 0xdf, 0x8f,
|
0x97, 0x24, 0x60, 0xe9, 0xe2, 0x67, 0x29, 0x57, 0xfc, 0x44, 0xdd, 0x96, 0xe5, 0xf2, 0xdd, 0x16,
|
||||||
0xb7, 0x94, 0xae, 0xe4, 0x88, 0x38, 0xb1, 0xe4, 0xc7, 0xd3, 0x4b, 0xfa, 0x4f, 0x05, 0xea, 0xbf,
|
0x52, 0xd6, 0xd3, 0x8c, 0x9c, 0xca, 0xae, 0xa9, 0xb3, 0x8f, 0x72, 0x0c, 0xfd, 0x0f, 0x05, 0x3a,
|
||||||
0x4a, 0x86, 0xc4, 0x61, 0x6f, 0xc5, 0x0f, 0x7b, 0x2d, 0xe7, 0xb0, 0x3a, 0x0e, 0x3c, 0x0b, 0x1f,
|
0x03, 0x6c, 0xf8, 0xe3, 0x13, 0xc1, 0xc7, 0x77, 0x93, 0xdd, 0xa9, 0xd7, 0x0b, 0x44, 0x9c, 0x5a,
|
||||||
0xe0, 0x1f, 0xdd, 0x71, 0xff, 0x49, 0x81, 0x76, 0xe7, 0xd0, 0xe9, 0xeb, 0xcc, 0x96, 0x4f, 0x6f,
|
0xf2, 0xd5, 0x69, 0x4b, 0xfd, 0xa7, 0x02, 0xed, 0xdf, 0x24, 0x43, 0xe2, 0xb0, 0x77, 0x93, 0x87,
|
||||||
0x31, 0x97, 0xa1, 0x71, 0x90, 0x48, 0xb5, 0x0a, 0x54, 0xe1, 0xea, 0x07, 0xf1, 0x9a, 0x4c, 0x07,
|
0x7d, 0xa3, 0xe0, 0xb0, 0x3a, 0x0e, 0x7d, 0x0b, 0x9f, 0xe2, 0xaf, 0xdc, 0x71, 0xff, 0x49, 0x81,
|
||||||
0x55, 0x74, 0xb2, 0xf8, 0x61, 0x85, 0x6b, 0x7d, 0x5b, 0x46, 0x75, 0x8a, 0x38, 0xea, 0x9a, 0xe6,
|
0xfe, 0xe0, 0xcc, 0x1d, 0xeb, 0xcc, 0x96, 0x2f, 0x6f, 0x31, 0xb7, 0xa0, 0x73, 0x9a, 0xca, 0xda,
|
||||||
0xbd, 0x24, 0x50, 0xfb, 0x7d, 0x05, 0x16, 0x25, 0x13, 0xd1, 0x39, 0x98, 0xe3, 0xf5, 0x1f, 0x8f,
|
0x2a, 0x54, 0xe1, 0xda, 0xa7, 0xc9, 0xf2, 0x4e, 0x07, 0x55, 0x34, 0xc5, 0xf8, 0x61, 0x85, 0x6b,
|
||||||
0xc1, 0xcc, 0x86, 0x4d, 0x22, 0x9e, 0xa8, 0x83, 0x61, 0x99, 0xd9, 0xfc, 0xcd, 0x44, 0x97, 0xa0,
|
0x7d, 0x53, 0x46, 0x75, 0x86, 0x38, 0xea, 0x9a, 0x56, 0xfc, 0x34, 0x50, 0xfb, 0x43, 0x05, 0xd6,
|
||||||
0x16, 0x26, 0xea, 0x66, 0x46, 0x3e, 0xa6, 0x8f, 0xda, 0x50, 0xe1, 0xce, 0x49, 0x54, 0x40, 0xe1,
|
0x24, 0x13, 0xd1, 0x75, 0x58, 0xe6, 0xa5, 0x24, 0x8f, 0xc1, 0xcc, 0x86, 0x4d, 0x22, 0x9e, 0xb8,
|
||||||
0xb7, 0xf6, 0x77, 0x0a, 0x2c, 0xdd, 0x33, 0x1c, 0xd3, 0x1d, 0x0c, 0x4e, 0xcf, 0xd6, 0x4d, 0x48,
|
0x19, 0x62, 0x99, 0xf9, 0x54, 0xd0, 0x44, 0xaf, 0x42, 0x2b, 0xca, 0xf9, 0xcd, 0x9c, 0x7c, 0xcc,
|
||||||
0xe4, 0xf7, 0xd3, 0x76, 0x0e, 0x92, 0x45, 0xc1, 0x75, 0x58, 0xf0, 0x98, 0x67, 0x34, 0x93, 0x7c,
|
0x00, 0xf5, 0xa1, 0xc1, 0x9d, 0x93, 0x28, 0xa6, 0xa2, 0x6f, 0xed, 0xef, 0x15, 0x58, 0x7f, 0xcf,
|
||||||
0x2f, 0xea, 0xaa, 0x18, 0x08, 0xf9, 0xf9, 0x75, 0x01, 0x10, 0x09, 0x06, 0x77, 0x0c, 0xdb, 0x70,
|
0x70, 0x4d, 0x6f, 0x32, 0xb9, 0x3c, 0x5b, 0xf7, 0x20, 0x55, 0x2a, 0x94, 0x6d, 0x42, 0xa4, 0xeb,
|
||||||
0xfa, 0xf8, 0xe4, 0xa4, 0x5f, 0x85, 0x66, 0x22, 0x84, 0x85, 0xd7, 0x54, 0xf1, 0x18, 0xe6, 0xa3,
|
0x8b, 0xb7, 0x60, 0xd5, 0x67, 0x9e, 0xd1, 0x4c, 0xf3, 0xbd, 0xaa, 0xab, 0x62, 0x20, 0xe2, 0xe7,
|
||||||
0xfb, 0xd0, 0xec, 0x31, 0x54, 0x5d, 0x0f, 0x1b, 0xbe, 0xeb, 0x50, 0xe7, 0xda, 0x94, 0x37, 0x09,
|
0xcf, 0x2b, 0x80, 0x48, 0x30, 0xb8, 0x6f, 0xd8, 0x86, 0x3b, 0xc6, 0x17, 0x27, 0xfd, 0x36, 0x74,
|
||||||
0x1e, 0x79, 0xd6, 0x70, 0x88, 0xbd, 0x4d, 0xd7, 0x31, 0x59, 0x10, 0x69, 0xf4, 0x04, 0x99, 0x64,
|
0x53, 0x21, 0x2c, 0xba, 0xf1, 0x4a, 0xc6, 0xb0, 0x00, 0xbd, 0x0f, 0xdd, 0x11, 0x43, 0x35, 0xf4,
|
||||||
0x29, 0x11, 0x5c, 0x14, 0xcf, 0x85, 0x68, 0x20, 0x0c, 0xe8, 0x94, 0x15, 0x3e, 0x36, 0xec, 0x88,
|
0xb1, 0x11, 0x78, 0x2e, 0x75, 0xae, 0x5d, 0x79, 0xbf, 0xe1, 0x91, 0x6f, 0x4d, 0xa7, 0xd8, 0xdf,
|
||||||
0x11, 0x91, 0x37, 0x56, 0xd9, 0x40, 0x27, 0xbf, 0x47, 0x24, 0x89, 0xaf, 0xda, 0xdf, 0x28, 0x80,
|
0xf3, 0x5c, 0x93, 0xe7, 0x62, 0x23, 0x41, 0x26, 0x59, 0x4a, 0x04, 0x17, 0xc7, 0x73, 0x21, 0x1a,
|
||||||
0xc2, 0x22, 0x87, 0x16, 0x6d, 0x54, 0xfb, 0xd2, 0x4b, 0x15, 0x49, 0x50, 0x38, 0x0f, 0x55, 0x53,
|
0x88, 0x02, 0x3a, 0x65, 0x45, 0x80, 0x0d, 0x3b, 0x66, 0x44, 0xec, 0x8d, 0x55, 0x36, 0x30, 0x28,
|
||||||
0xac, 0xe4, 0xe6, 0x12, 0x01, 0xa8, 0x8f, 0xa6, 0x44, 0x77, 0x49, 0x30, 0xc6, 0xa6, 0x28, 0x22,
|
0x6e, 0x37, 0x49, 0xe2, 0xab, 0xf6, 0xb7, 0x0a, 0xa0, 0xa8, 0x5e, 0xa2, 0xf5, 0x1f, 0xd5, 0xbe,
|
||||||
0x18, 0xf0, 0x01, 0x85, 0x25, 0xc3, 0x73, 0x29, 0x1d, 0x9e, 0xe3, 0x2d, 0x90, 0x72, 0xa2, 0x05,
|
0xec, 0x52, 0x45, 0x12, 0x14, 0x6e, 0x40, 0xd3, 0x14, 0x2b, 0xb9, 0xb9, 0xc4, 0x00, 0xea, 0xa3,
|
||||||
0xa2, 0x7d, 0x53, 0x00, 0x95, 0xba, 0xbb, 0xcd, 0xa8, 0x0e, 0x9f, 0x8a, 0xe8, 0xcb, 0xd0, 0xe0,
|
0x29, 0xd1, 0x43, 0x12, 0x8c, 0xb1, 0x29, 0xea, 0x11, 0x06, 0xfc, 0x80, 0xc2, 0xd2, 0xe1, 0xb9,
|
||||||
0x17, 0xb9, 0x09, 0xc2, 0xeb, 0xcf, 0x63, 0x9b, 0xa1, 0x1b, 0x70, 0x86, 0x4d, 0xf2, 0xb0, 0x3f,
|
0x96, 0x0d, 0xcf, 0xc9, 0x6e, 0x4a, 0x3d, 0xd5, 0x4d, 0xd1, 0x3e, 0xad, 0x80, 0x4a, 0xdd, 0xdd,
|
||||||
0xb1, 0xa3, 0xfc, 0x99, 0x25, 0xb3, 0xe8, 0x39, 0xf3, 0xb3, 0x64, 0x48, 0xac, 0x78, 0x0c, 0x4b,
|
0x5e, 0x5c, 0xd2, 0x97, 0x22, 0xfa, 0x16, 0x74, 0xf8, 0x9d, 0x70, 0x8a, 0xf0, 0xf6, 0xb3, 0xc4,
|
||||||
0x43, 0xdb, 0xed, 0x19, 0x76, 0x37, 0x29, 0x1e, 0x26, 0xc3, 0x29, 0x34, 0xfe, 0x0c, 0x5b, 0xde,
|
0x66, 0xe8, 0x1d, 0xb8, 0xca, 0x26, 0xf9, 0x38, 0x98, 0xdb, 0x71, 0x2a, 0xce, 0x92, 0x59, 0xf4,
|
||||||
0x89, 0xcb, 0xd0, 0x47, 0xdb, 0xa4, 0xe2, 0xc6, 0xcf, 0xc2, 0xfc, 0x84, 0x37, 0xe0, 0xa7, 0x49,
|
0x8c, 0xf9, 0x59, 0x32, 0x24, 0x56, 0x3c, 0x86, 0xf5, 0xa9, 0xed, 0x8d, 0x0c, 0x7b, 0x98, 0x16,
|
||||||
0x4f, 0xea, 0x64, 0xa1, 0xf8, 0xd2, 0xfe, 0x44, 0x81, 0xf9, 0x54, 0x17, 0x33, 0x5d, 0x07, 0x2a,
|
0x0f, 0x93, 0x61, 0x09, 0x8d, 0xbf, 0xca, 0x96, 0x0f, 0x92, 0x32, 0x0c, 0xd0, 0x01, 0x29, 0xde,
|
||||||
0xd9, 0x3a, 0xf0, 0x16, 0x94, 0x49, 0x71, 0xc4, 0x9c, 0x61, 0x53, 0x5e, 0xa3, 0x24, 0x77, 0xd5,
|
0xf1, 0xd3, 0x38, 0xcb, 0xaf, 0x97, 0xce, 0xf2, 0xdb, 0x64, 0x61, 0x94, 0xe4, 0xff, 0x99, 0x02,
|
||||||
0xd9, 0x02, 0xb4, 0x0e, 0x8b, 0x92, 0x5b, 0x43, 0xae, 0x03, 0x28, 0x7b, 0x69, 0xa8, 0x7d, 0x5b,
|
0x2b, 0x99, 0x86, 0x68, 0xb6, 0xa4, 0x54, 0xf2, 0x25, 0xe5, 0x5d, 0xa8, 0x93, 0x3a, 0x8b, 0x39,
|
||||||
0x82, 0x5a, 0x8c, 0x1f, 0xc7, 0x94, 0xb0, 0xd3, 0xb4, 0xa5, 0x52, 0xc7, 0x2b, 0x66, 0x8f, 0x97,
|
0xc3, 0xae, 0xbc, 0xdc, 0x49, 0xef, 0xaa, 0xb3, 0x05, 0x68, 0x07, 0xd6, 0x24, 0x17, 0x90, 0x5c,
|
||||||
0x73, 0x27, 0x45, 0xf4, 0x6e, 0x84, 0x47, 0x2c, 0xf9, 0xe7, 0x95, 0xc8, 0x08, 0x8f, 0x68, 0xea,
|
0x07, 0x50, 0xfe, 0xfe, 0x51, 0xfb, 0x45, 0x0d, 0x5a, 0x09, 0x7e, 0x2c, 0xa8, 0x86, 0xcb, 0x74,
|
||||||
0x1f, 0xcf, 0xea, 0x67, 0x13, 0x59, 0x7d, 0xaa, 0xee, 0x99, 0x3b, 0xa2, 0xee, 0xa9, 0x24, 0xeb,
|
0xb8, 0x32, 0xc7, 0xab, 0xe6, 0x8f, 0x57, 0x70, 0xbd, 0x45, 0xf4, 0xce, 0xc1, 0x0e, 0x4b, 0xfe,
|
||||||
0x9e, 0x84, 0x1d, 0x55, 0xd3, 0x76, 0x34, 0x6d, 0x55, 0x79, 0x03, 0x16, 0xfb, 0x1e, 0x36, 0x02,
|
0x79, 0x25, 0xe2, 0x60, 0x87, 0xa6, 0xfe, 0xc9, 0xac, 0x7e, 0x29, 0x95, 0xd5, 0x67, 0xea, 0x9e,
|
||||||
0x6c, 0xde, 0x39, 0xdc, 0x0c, 0x87, 0x78, 0x66, 0x24, 0x1b, 0x42, 0x77, 0xa3, 0x76, 0x0e, 0x93,
|
0xe5, 0x73, 0xea, 0x9e, 0x46, 0xba, 0xee, 0x49, 0xd9, 0x51, 0x33, 0x6b, 0x47, 0x65, 0x0b, 0xd4,
|
||||||
0x72, 0x9d, 0x4a, 0x59, 0x5e, 0x56, 0x71, 0xd9, 0x30, 0x21, 0x0b, 0xf7, 0x4c, 0xbf, 0xd2, 0xf5,
|
0x77, 0x60, 0x6d, 0xec, 0x63, 0x23, 0xc4, 0xe6, 0xfd, 0xb3, 0xbd, 0x68, 0x88, 0x67, 0x46, 0xb2,
|
||||||
0x6c, 0xe3, 0x44, 0xf5, 0xec, 0x25, 0xa8, 0x89, 0xd0, 0x4a, 0xcc, 0xbd, 0xc9, 0x3c, 0x9f, 0xf0,
|
0x21, 0xf4, 0x20, 0xee, 0x0c, 0x31, 0x29, 0xb7, 0xa9, 0x94, 0xe5, 0x65, 0x15, 0x97, 0x0d, 0x13,
|
||||||
0x05, 0xa6, 0x9f, 0x70, 0x06, 0xf3, 0xc9, 0x7e, 0x68, 0xba, 0x28, 0x55, 0xb3, 0x45, 0xe9, 0x39,
|
0xb2, 0x70, 0xcf, 0xf4, 0x2b, 0x5b, 0x1a, 0x77, 0x2e, 0x54, 0x1a, 0xbf, 0x0a, 0x2d, 0x11, 0x5a,
|
||||||
0x98, 0xb3, 0xfc, 0xee, 0xc0, 0x78, 0x86, 0x5b, 0x0b, 0x74, 0x74, 0xd6, 0xf2, 0xef, 0x1a, 0xcf,
|
0x89, 0xb9, 0x77, 0x99, 0xe7, 0x13, 0xbe, 0xc0, 0x0c, 0x52, 0xce, 0x60, 0x25, 0xdd, 0x5a, 0xcd,
|
||||||
0xb0, 0xf6, 0x2f, 0x45, 0x68, 0x46, 0x55, 0xcc, 0xd4, 0x6e, 0x64, 0x9a, 0x9b, 0xf3, 0x5d, 0x50,
|
0x16, 0xa5, 0x6a, 0xbe, 0x28, 0xbd, 0x0e, 0xcb, 0x56, 0x30, 0x9c, 0x18, 0x4f, 0x71, 0x6f, 0x95,
|
||||||
0xa3, 0x40, 0x4d, 0x39, 0x7c, 0x64, 0x21, 0x96, 0xbe, 0x64, 0x98, 0x1f, 0xa7, 0xec, 0x35, 0xd1,
|
0x8e, 0x2e, 0x59, 0xc1, 0x03, 0xe3, 0x29, 0xd6, 0xfe, 0xa5, 0x0a, 0xdd, 0xb8, 0x8a, 0x29, 0xed,
|
||||||
0xc6, 0x2d, 0xbd, 0x52, 0x1b, 0xf7, 0x94, 0x37, 0x78, 0x37, 0xe1, 0x6c, 0x18, 0x80, 0x13, 0xc7,
|
0x46, 0xca, 0x5c, 0xc2, 0x1f, 0x81, 0x1a, 0x07, 0x6a, 0xca, 0xe1, 0x73, 0x0b, 0xb1, 0xec, 0x7d,
|
||||||
0x66, 0x59, 0xfe, 0x19, 0x31, 0xb8, 0x17, 0x3f, 0x7e, 0x8e, 0x0b, 0x98, 0xcb, 0x73, 0x01, 0x69,
|
0xc5, 0xca, 0x2c, 0x63, 0xaf, 0xa9, 0x8e, 0x70, 0xed, 0x85, 0x3a, 0xc2, 0x97, 0xbc, 0x0c, 0xbc,
|
||||||
0x15, 0xa8, 0x64, 0x54, 0x20, 0x7b, 0x91, 0x58, 0x95, 0x5c, 0x24, 0x6a, 0x8f, 0x61, 0x91, 0xf6,
|
0x03, 0xd7, 0xa2, 0x00, 0x9c, 0x3a, 0x36, 0xcb, 0xf2, 0xaf, 0x8a, 0xc1, 0xe3, 0xe4, 0xf1, 0x0b,
|
||||||
0xee, 0xfc, 0xbe, 0x67, 0xf5, 0x70, 0x98, 0xb3, 0x4e, 0x23, 0xd6, 0x36, 0x54, 0x52, 0x69, 0x6f,
|
0x5c, 0xc0, 0x72, 0x91, 0x0b, 0xc8, 0xaa, 0x40, 0x23, 0xa7, 0x02, 0xf9, 0x3b, 0xc9, 0xa6, 0xe4,
|
||||||
0xf8, 0xad, 0x7d, 0xa5, 0xc0, 0x52, 0x76, 0x5f, 0xaa, 0x31, 0x91, 0x23, 0x51, 0x12, 0x8e, 0xe4,
|
0x4e, 0x52, 0x7b, 0x0c, 0x6b, 0xb4, 0x0d, 0x18, 0x8c, 0x7d, 0x6b, 0x84, 0xa3, 0x9c, 0xb5, 0x8c,
|
||||||
0xd7, 0x61, 0x31, 0xda, 0x3e, 0x99, 0x50, 0xe7, 0xa4, 0x8c, 0x12, 0xc2, 0x75, 0x14, 0xed, 0x21,
|
0x58, 0xfb, 0xd0, 0xc8, 0xa4, 0xbd, 0xd1, 0xb7, 0xf6, 0x33, 0x05, 0xd6, 0xf3, 0xfb, 0x52, 0x8d,
|
||||||
0x60, 0xda, 0xb7, 0x4a, 0xd8, 0x02, 0x25, 0xb0, 0x21, 0x6d, 0xff, 0x92, 0xe0, 0xe6, 0x3a, 0xb6,
|
0x89, 0x1d, 0x89, 0x92, 0x72, 0x24, 0xbf, 0x0d, 0x6b, 0xf1, 0xf6, 0xe9, 0x84, 0xba, 0x20, 0x65,
|
||||||
0xe5, 0x84, 0x55, 0x37, 0x3f, 0x23, 0x03, 0xf2, 0xaa, 0xfb, 0x1e, 0xcc, 0xf3, 0x49, 0x61, 0x8c,
|
0x94, 0x10, 0xae, 0xa3, 0x78, 0x0f, 0x01, 0xd3, 0x7e, 0xa1, 0x44, 0xdd, 0x54, 0x02, 0x9b, 0xd2,
|
||||||
0x9a, 0x32, 0x2b, 0x6b, 0xb2, 0x75, 0x61, 0x74, 0xba, 0x0a, 0x4d, 0xde, 0x97, 0x15, 0xf8, 0x8a,
|
0x4e, 0x32, 0x09, 0x6e, 0x9e, 0x6b, 0x5b, 0x6e, 0x54, 0x75, 0xf3, 0x33, 0x32, 0x20, 0xaf, 0xba,
|
||||||
0x92, 0x6e, 0x2d, 0xfa, 0x15, 0x50, 0xc5, 0xb4, 0x57, 0x8d, 0x8a, 0xf3, 0x7c, 0x61, 0x98, 0xdd,
|
0xdf, 0x83, 0x15, 0x3e, 0x29, 0x8a, 0x51, 0x25, 0xb3, 0xb2, 0x2e, 0x5b, 0x17, 0x45, 0xa7, 0xdb,
|
||||||
0xfd, 0x8e, 0x02, 0xad, 0x64, 0x8c, 0x8c, 0x1d, 0xff, 0xd5, 0x73, 0xbc, 0x0f, 0x93, 0x37, 0x5a,
|
0xd0, 0xe5, 0x2d, 0x5e, 0x81, 0xaf, 0x2a, 0x69, 0xfc, 0xa2, 0xdf, 0x00, 0x55, 0x4c, 0x7b, 0xd1,
|
||||||
0x57, 0x8f, 0xa0, 0x27, 0xc2, 0x23, 0xee, 0xb5, 0x76, 0xe9, 0xed, 0x24, 0x29, 0x4d, 0xb6, 0x2c,
|
0xa8, 0xb8, 0xc2, 0x17, 0x46, 0xd9, 0xdd, 0x4f, 0x15, 0xe8, 0xa5, 0x63, 0x64, 0xe2, 0xf8, 0x2f,
|
||||||
0x3f, 0xf0, 0xac, 0xde, 0xe4, 0x54, 0x4f, 0x2b, 0xb4, 0xbf, 0x2d, 0xc0, 0x4f, 0xa5, 0x1b, 0x9e,
|
0x9e, 0xe3, 0x7d, 0x37, 0x7d, 0x39, 0x76, 0xfb, 0x1c, 0x7a, 0x62, 0x3c, 0xe2, 0x8a, 0xec, 0x88,
|
||||||
0xe6, 0xee, 0x2a, 0xaf, 0x13, 0x70, 0x07, 0x2a, 0xa9, 0x12, 0xe6, 0xda, 0x11, 0x87, 0xe7, 0x4d,
|
0x5e, 0x74, 0x92, 0xd2, 0x64, 0xdf, 0x0a, 0x42, 0xdf, 0x1a, 0xcd, 0x2f, 0xf5, 0x4a, 0x43, 0xfb,
|
||||||
0x2d, 0xd6, 0x5c, 0x11, 0xeb, 0xc8, 0x1e, 0xa1, 0x4e, 0x97, 0xf2, 0xf7, 0xe0, 0x4a, 0x9b, 0xd8,
|
0xbb, 0x0a, 0x7c, 0x5d, 0xba, 0xe1, 0x65, 0xae, 0xc1, 0x8a, 0x3a, 0x01, 0xf7, 0xa1, 0x91, 0x29,
|
||||||
0x43, 0xac, 0x43, 0xb7, 0xa1, 0xce, 0xca, 0xc3, 0xee, 0x81, 0x85, 0x5f, 0x88, 0x2b, 0x97, 0x8b,
|
0x61, 0xde, 0x38, 0xe7, 0xf0, 0xbc, 0xa9, 0xc5, 0x9a, 0x2b, 0x62, 0x1d, 0xd9, 0x23, 0xd2, 0xe9,
|
||||||
0x52, 0xbf, 0x46, 0xe7, 0x3d, 0xb1, 0xf0, 0x0b, 0xbd, 0x66, 0x87, 0xbf, 0x7d, 0xed, 0x2f, 0x0b,
|
0x5a, 0xf1, 0x1e, 0x5c, 0x69, 0x53, 0x7b, 0x88, 0x75, 0xe8, 0x1e, 0xb4, 0x59, 0x79, 0x38, 0x3c,
|
||||||
0x00, 0xd1, 0x18, 0xa9, 0x4d, 0x23, 0x83, 0xe1, 0x16, 0x10, 0x83, 0x90, 0x40, 0x9c, 0xcc, 0xfd,
|
0xb5, 0xf0, 0x73, 0x71, 0x7b, 0x73, 0x53, 0xea, 0xd7, 0xe8, 0xbc, 0x27, 0x16, 0x7e, 0xae, 0xb7,
|
||||||
0xc4, 0x27, 0xd2, 0xa3, 0x9e, 0xaa, 0x69, 0xf9, 0x01, 0xe7, 0xcb, 0xfa, 0xd1, 0xb4, 0x08, 0x16,
|
0xec, 0xe8, 0x77, 0xa0, 0xfd, 0x77, 0x15, 0x20, 0x1e, 0x23, 0xb5, 0x69, 0x6c, 0x30, 0xdc, 0x02,
|
||||||
0x11, 0x91, 0xb1, 0x1b, 0x0d, 0x51, 0x7b, 0x11, 0x08, 0x7a, 0x07, 0xd0, 0xd0, 0x73, 0x5f, 0x58,
|
0x12, 0x10, 0x12, 0x88, 0xd3, 0xb9, 0x9f, 0xf8, 0x44, 0x7a, 0xdc, 0x9e, 0x35, 0xad, 0x20, 0xe4,
|
||||||
0xce, 0x30, 0x9e, 0xb1, 0xb3, 0xc4, 0x7e, 0x81, 0x8f, 0x44, 0x29, 0x7b, 0xbb, 0x0b, 0x6a, 0x7a,
|
0x7c, 0xd9, 0x39, 0x9f, 0x16, 0xc1, 0x22, 0x22, 0x32, 0x76, 0x39, 0x22, 0x6a, 0x2f, 0x02, 0x41,
|
||||||
0x3f, 0xc9, 0xc5, 0xc6, 0x7b, 0xc9, 0x8b, 0x8d, 0xa3, 0xcc, 0x88, 0x6c, 0x13, 0xbf, 0xd9, 0xf8,
|
0x6f, 0x03, 0x9a, 0xfa, 0xde, 0x73, 0xcb, 0x9d, 0x26, 0x33, 0x76, 0x96, 0xd8, 0xaf, 0xf2, 0x91,
|
||||||
0x24, 0x4c, 0xb3, 0x28, 0x79, 0x79, 0x9e, 0x2b, 0xd6, 0xcc, 0x2a, 0x24, 0x9a, 0x59, 0xda, 0x1f,
|
0x44, 0xca, 0xfe, 0x63, 0x50, 0x33, 0xd3, 0x05, 0x4b, 0xee, 0x2c, 0x20, 0xe3, 0x20, 0xb5, 0x17,
|
||||||
0x29, 0x80, 0xb2, 0x5a, 0x81, 0x9a, 0x50, 0x08, 0x37, 0x29, 0xec, 0x6c, 0xa5, 0xa4, 0x50, 0xc8,
|
0xbf, 0xa7, 0x59, 0x49, 0x63, 0x08, 0xfa, 0x43, 0x50, 0xb3, 0xf4, 0x4a, 0xee, 0x60, 0xbe, 0x9d,
|
||||||
0x48, 0xe1, 0x3c, 0x54, 0xc3, 0x48, 0xc2, 0xdd, 0x46, 0x04, 0x88, 0xcb, 0xa8, 0x94, 0x94, 0x51,
|
0xbe, 0x83, 0x39, 0xcf, 0x4c, 0xc9, 0x36, 0x89, 0x4b, 0x98, 0xfe, 0x04, 0xae, 0xca, 0x28, 0x91,
|
||||||
0x8c, 0xb0, 0x72, 0x92, 0xb0, 0x7d, 0x40, 0x59, 0x4d, 0x8b, 0xef, 0xa4, 0x24, 0x77, 0x3a, 0x8e,
|
0x20, 0xb9, 0x9b, 0x46, 0x52, 0x26, 0xa7, 0x4d, 0x5c, 0xf6, 0xfc, 0x20, 0x4a, 0x17, 0x29, 0x9b,
|
||||||
0xc2, 0x18, 0xa6, 0x62, 0x12, 0xd3, 0xbf, 0x17, 0x00, 0x45, 0xb1, 0x32, 0xbc, 0x75, 0x99, 0x26,
|
0x8b, 0x3c, 0x70, 0xa2, 0x29, 0x57, 0x49, 0x35, 0xe5, 0xb4, 0x3f, 0x51, 0x00, 0xe5, 0xb5, 0x1b,
|
||||||
0xc0, 0xac, 0xc3, 0x62, 0x36, 0x92, 0x8a, 0xf4, 0x01, 0x65, 0xe2, 0xa8, 0x2c, 0xe6, 0x15, 0x65,
|
0x75, 0xa1, 0x12, 0x6d, 0x52, 0x39, 0xdc, 0xcf, 0x68, 0x53, 0x25, 0xa7, 0x4d, 0x37, 0xa0, 0x19,
|
||||||
0x8f, 0x67, 0xde, 0x0f, 0x7d, 0x03, 0x4b, 0x0c, 0x2e, 0xe6, 0x25, 0x06, 0x29, 0xf7, 0xf0, 0x1b,
|
0x45, 0x44, 0xee, 0xfe, 0x62, 0x40, 0x52, 0xd7, 0x6a, 0x69, 0x5d, 0x4b, 0x10, 0x56, 0x4f, 0x13,
|
||||||
0xe9, 0x47, 0x37, 0xcc, 0xfe, 0x6e, 0x49, 0xed, 0x38, 0x73, 0xe4, 0x37, 0xff, 0xe2, 0xe6, 0x5f,
|
0x76, 0x02, 0x28, 0x6f, 0x31, 0xc9, 0x9d, 0x94, 0xf4, 0x4e, 0x8b, 0x28, 0x4c, 0x60, 0xaa, 0xa6,
|
||||||
0x0b, 0xb0, 0x10, 0x72, 0xe3, 0x95, 0x38, 0x7d, 0xfc, 0x2d, 0xd7, 0x1b, 0x66, 0xed, 0xe7, 0x72,
|
0x31, 0xfd, 0x7b, 0x05, 0x50, 0x1c, 0xf3, 0xa3, 0x8b, 0xa8, 0x32, 0x81, 0x72, 0x07, 0xd6, 0xf2,
|
||||||
0xd6, 0xfe, 0xe2, 0x91, 0xb9, 0xdf, 0x77, 0xc7, 0xd9, 0x0e, 0xcc, 0xf1, 0xb6, 0x53, 0xc6, 0x76,
|
0x19, 0x81, 0x48, 0x83, 0x50, 0x2e, 0x1f, 0x90, 0xc5, 0xee, 0xaa, 0xec, 0x3d, 0xd1, 0xbb, 0x91,
|
||||||
0xa7, 0xa9, 0xae, 0xce, 0x40, 0x99, 0xb8, 0x0a, 0xd1, 0x87, 0x61, 0x1f, 0xda, 0x5f, 0x2b, 0x00,
|
0x8f, 0x63, 0x09, 0xce, 0xcd, 0xa2, 0x04, 0x27, 0xe3, 0xe6, 0x7e, 0x27, 0xfb, 0x0e, 0x89, 0x19,
|
||||||
0x9d, 0x43, 0xa7, 0x7f, 0x9b, 0x99, 0xd0, 0x0d, 0x28, 0x1d, 0xf7, 0xe6, 0x80, 0xcc, 0xa6, 0xc9,
|
0xcd, 0x5d, 0xa9, 0x3f, 0xca, 0x1d, 0xf9, 0xe5, 0x3f, 0x42, 0xfa, 0xd7, 0x0a, 0xac, 0x46, 0xdc,
|
||||||
0x2a, 0x9d, 0x39, 0x85, 0xd4, 0x12, 0x85, 0x61, 0x31, 0x5d, 0x18, 0xe6, 0x95, 0x74, 0xf9, 0x6e,
|
0x78, 0x21, 0x4e, 0x2f, 0xbe, 0xf8, 0x7b, 0xc9, 0xac, 0xfd, 0x48, 0xce, 0xda, 0x5f, 0x3d, 0x37,
|
||||||
0xe3, 0x1f, 0x15, 0x38, 0x47, 0x88, 0x78, 0x2d, 0x31, 0x7c, 0x2a, 0xd6, 0xc5, 0x5c, 0x52, 0x31,
|
0x87, 0xfd, 0xfc, 0x38, 0x3b, 0x80, 0x65, 0xde, 0x3e, 0xcb, 0xd9, 0x6e, 0x99, 0x2a, 0xf1, 0x2a,
|
||||||
0xe9, 0x92, 0x6e, 0xc1, 0x1c, 0xab, 0xcd, 0x44, 0x3c, 0xbd, 0x98, 0xc7, 0x32, 0xc6, 0x60, 0x5d,
|
0xd4, 0x89, 0xab, 0x10, 0xfd, 0x24, 0xf6, 0xa1, 0xfd, 0x8d, 0x02, 0x30, 0x38, 0x73, 0xc7, 0xf7,
|
||||||
0x4c, 0x5f, 0xfd, 0x65, 0xa8, 0x86, 0x3d, 0x52, 0x54, 0x83, 0xb9, 0xc7, 0xce, 0x7d, 0xc7, 0x7d,
|
0x98, 0x09, 0xbd, 0x03, 0xb5, 0x45, 0xcf, 0x30, 0xc8, 0x6c, 0x9a, 0x74, 0xd3, 0x99, 0x25, 0xa4,
|
||||||
0xe1, 0xa8, 0x33, 0x68, 0x0e, 0x8a, 0xb7, 0x6d, 0x5b, 0x55, 0x50, 0x03, 0xaa, 0x9d, 0xc0, 0xc3,
|
0x96, 0x2a, 0x70, 0xab, 0xd9, 0x02, 0xb7, 0xa8, 0x34, 0x2d, 0x76, 0x1b, 0xff, 0xa8, 0xc0, 0x75,
|
||||||
0xc6, 0xc8, 0x72, 0x86, 0x6a, 0x01, 0x35, 0x01, 0xee, 0x59, 0x7e, 0xe0, 0x7a, 0x56, 0xdf, 0xb0,
|
0x42, 0xc4, 0x67, 0x92, 0x8b, 0x94, 0x62, 0x5d, 0xc2, 0x25, 0x55, 0xd3, 0x2e, 0xe9, 0x2e, 0x2c,
|
||||||
0xd5, 0xe2, 0xea, 0x97, 0xd0, 0x4c, 0x56, 0x20, 0xa8, 0x0e, 0x95, 0x5d, 0x37, 0xf8, 0xf4, 0xa5,
|
0xb3, 0x1a, 0x53, 0xe4, 0x05, 0x37, 0x8b, 0x58, 0xc6, 0x18, 0xac, 0x8b, 0xe9, 0x5b, 0xbf, 0x0e,
|
||||||
0xe5, 0x07, 0xea, 0x0c, 0x99, 0xbf, 0xeb, 0x06, 0x7b, 0x1e, 0xf6, 0xb1, 0x13, 0xa8, 0x0a, 0x02,
|
0xcd, 0xa8, 0xd7, 0x8b, 0x5a, 0xb0, 0xfc, 0xd8, 0x7d, 0xdf, 0xf5, 0x9e, 0xbb, 0xea, 0x15, 0xb4,
|
||||||
0x98, 0xfd, 0xcc, 0xd9, 0xb2, 0xfc, 0x67, 0x6a, 0x01, 0x2d, 0xf2, 0xe6, 0x82, 0x61, 0xef, 0xf0,
|
0x0c, 0xd5, 0x7b, 0xb6, 0xad, 0x2a, 0xa8, 0x03, 0xcd, 0x41, 0xe8, 0x63, 0xc3, 0xb1, 0xdc, 0xa9,
|
||||||
0xb4, 0x5e, 0x2d, 0x92, 0xe5, 0xe1, 0x57, 0x09, 0xa9, 0x50, 0x0f, 0xa7, 0x6c, 0xef, 0x3d, 0x56,
|
0x5a, 0x41, 0x5d, 0x80, 0xf7, 0xac, 0x20, 0xf4, 0x7c, 0x6b, 0x6c, 0xd8, 0x6a, 0x75, 0xeb, 0x13,
|
||||||
0xcb, 0xa8, 0x0a, 0x65, 0xf6, 0x73, 0x76, 0xd5, 0x04, 0x35, 0xdd, 0x19, 0x23, 0x7b, 0xb2, 0x43,
|
0xe8, 0xa6, 0x2b, 0x29, 0xd4, 0x86, 0xc6, 0x91, 0x17, 0xfe, 0xf0, 0x63, 0x2b, 0x08, 0xd5, 0x2b,
|
||||||
0x84, 0x20, 0x75, 0x86, 0x9c, 0x8c, 0xb7, 0x26, 0x55, 0x05, 0xcd, 0x43, 0x2d, 0xd6, 0xe8, 0x53,
|
0x64, 0xfe, 0x91, 0x17, 0x1e, 0xfb, 0x38, 0xc0, 0x6e, 0xa8, 0x2a, 0x08, 0x60, 0xe9, 0x47, 0xee,
|
||||||
0x0b, 0x04, 0xb0, 0xed, 0x8d, 0xfb, 0x5c, 0x7a, 0x8c, 0x04, 0x92, 0x83, 0x6e, 0x11, 0x4e, 0x94,
|
0xbe, 0x15, 0x3c, 0x55, 0x2b, 0x68, 0x8d, 0x37, 0x49, 0x0c, 0xfb, 0x90, 0x97, 0x27, 0x6a, 0x95,
|
||||||
0x56, 0xef, 0x40, 0x45, 0x94, 0x46, 0x64, 0x2a, 0x67, 0x11, 0xf9, 0x54, 0x67, 0xd0, 0x02, 0x34,
|
0x2c, 0x8f, 0xbe, 0x6a, 0x48, 0x85, 0x76, 0x34, 0xe5, 0xe0, 0xf8, 0xb1, 0x5a, 0x47, 0x4d, 0xa8,
|
||||||
0x12, 0x8f, 0x0a, 0x55, 0x05, 0x21, 0x68, 0x26, 0xdf, 0xec, 0xaa, 0x85, 0xd5, 0x0d, 0x80, 0xc8,
|
0xb3, 0x9f, 0x4b, 0x5b, 0x26, 0xa8, 0xd9, 0x0e, 0x1f, 0xd9, 0x93, 0x1d, 0x22, 0x02, 0xa9, 0x57,
|
||||||
0xd4, 0x09, 0x39, 0x3b, 0xce, 0x81, 0x61, 0x5b, 0x26, 0xa3, 0x8d, 0x0c, 0x11, 0xee, 0x52, 0xee,
|
0xc8, 0xc9, 0x78, 0x8b, 0x55, 0x55, 0xd0, 0x0a, 0xb4, 0x12, 0x0d, 0x4b, 0xb5, 0x42, 0x00, 0x07,
|
||||||
0xb0, 0x16, 0x97, 0x5a, 0x58, 0xbd, 0x04, 0x15, 0xa1, 0xe5, 0x04, 0xae, 0xe3, 0x91, 0x7b, 0x80,
|
0xfe, 0x6c, 0xcc, 0xa5, 0xc7, 0x48, 0x20, 0xb9, 0xf4, 0x3e, 0xe1, 0x44, 0x6d, 0xeb, 0x3e, 0x34,
|
||||||
0x99, 0x64, 0x3a, 0x38, 0x50, 0x95, 0x8d, 0xff, 0x69, 0x00, 0xb0, 0x66, 0x96, 0xeb, 0x7a, 0x26,
|
0x44, 0x89, 0x47, 0xa6, 0x72, 0x16, 0x91, 0x4f, 0xf5, 0x0a, 0x5a, 0x85, 0x4e, 0xea, 0x9d, 0xa5,
|
||||||
0xb2, 0x01, 0x6d, 0xe3, 0x80, 0x14, 0xea, 0xae, 0x23, 0x8a, 0x6c, 0x1f, 0xad, 0x25, 0x55, 0x81,
|
0xaa, 0x20, 0x04, 0xdd, 0xf4, 0x33, 0x66, 0xb5, 0xb2, 0xb5, 0x0b, 0x10, 0x9b, 0x3a, 0x21, 0xe7,
|
||||||
0x7f, 0x64, 0x27, 0xf2, 0xd3, 0xb7, 0xaf, 0x48, 0xe7, 0xa7, 0x26, 0x6b, 0x33, 0x68, 0x44, 0xb1,
|
0xd0, 0x3d, 0x35, 0x6c, 0xcb, 0x64, 0xb4, 0x91, 0x21, 0xc2, 0x5d, 0xca, 0x1d, 0xd6, 0xaa, 0x53,
|
||||||
0x3d, 0xb2, 0x46, 0xf8, 0x91, 0xd5, 0x7f, 0x16, 0x76, 0xc0, 0xf2, 0x1f, 0xdc, 0xa6, 0xa6, 0x0a,
|
0x2b, 0x5b, 0xaf, 0x42, 0x43, 0x68, 0x39, 0x81, 0xeb, 0xd8, 0xf1, 0x4e, 0x31, 0x93, 0xcc, 0x00,
|
||||||
0x7c, 0x97, 0xa5, 0xf8, 0x3a, 0x81, 0x67, 0x39, 0x43, 0x91, 0xc2, 0x6a, 0x33, 0xe8, 0x79, 0xea,
|
0x87, 0xaa, 0xb2, 0xfb, 0xbf, 0x1d, 0x00, 0xd6, 0x94, 0xf3, 0x3c, 0xdf, 0x44, 0x36, 0xa0, 0x03,
|
||||||
0xb9, 0xaf, 0x40, 0xb8, 0x31, 0xcd, 0x0b, 0xdf, 0x93, 0xa1, 0xb4, 0x61, 0x3e, 0xf5, 0xf7, 0x05,
|
0x1c, 0xee, 0x79, 0xce, 0xcc, 0x73, 0x45, 0xb3, 0x20, 0x40, 0xdb, 0x69, 0x55, 0xe0, 0x1f, 0xf9,
|
||||||
0xb4, 0x2a, 0x7f, 0xc1, 0x25, 0xfb, 0xab, 0x45, 0xfb, 0xfa, 0x54, 0x73, 0x43, 0x6c, 0x16, 0x34,
|
0x89, 0xfc, 0xf4, 0xfd, 0xd7, 0xa5, 0xf3, 0x33, 0x93, 0xb5, 0x2b, 0xc8, 0xa1, 0xd8, 0x1e, 0x59,
|
||||||
0x93, 0x4f, 0xf4, 0xd1, 0x2f, 0xe4, 0x6d, 0x90, 0x79, 0x43, 0xda, 0x5e, 0x9d, 0x66, 0x6a, 0x88,
|
0x0e, 0x7e, 0x64, 0x8d, 0x9f, 0x46, 0x9d, 0xbc, 0xe2, 0x37, 0xc8, 0x99, 0xa9, 0x02, 0xdf, 0x2d,
|
||||||
0xea, 0x29, 0x53, 0xd0, 0xe3, 0x50, 0x49, 0x1f, 0xcb, 0xb6, 0x8f, 0xaa, 0x1e, 0xb4, 0x19, 0xf4,
|
0x29, 0xbe, 0x41, 0xe8, 0x5b, 0xee, 0x54, 0xa4, 0xe2, 0xda, 0x15, 0xf4, 0x2c, 0xf3, 0x02, 0x5a,
|
||||||
0x05, 0x2c, 0x64, 0x5e, 0xba, 0xa2, 0x9f, 0xc9, 0x6f, 0x39, 0xe4, 0x0f, 0x62, 0x8f, 0xc3, 0xf0,
|
0x20, 0xdc, 0x2d, 0xf3, 0xe8, 0xf9, 0x62, 0x28, 0x6d, 0x58, 0xc9, 0xfc, 0xa3, 0x03, 0x6d, 0xc9,
|
||||||
0x34, 0x6d, 0x5e, 0xf9, 0xd4, 0x67, 0x1e, 0xae, 0x4f, 0x4f, 0x7d, 0x6c, 0xfb, 0xa3, 0xa8, 0x7f,
|
0x1f, 0xb5, 0xc9, 0xfe, 0x7d, 0xd2, 0x7f, 0xab, 0xd4, 0xdc, 0x08, 0x9b, 0x05, 0xdd, 0xf4, 0xbf,
|
||||||
0x65, 0x0c, 0x13, 0x6a, 0x36, 0xe9, 0x96, 0xea, 0x3b, 0x32, 0x14, 0xb9, 0xcf, 0x6d, 0xdb, 0x6b,
|
0x16, 0xd0, 0xaf, 0x14, 0x6d, 0x90, 0x7b, 0x56, 0xdb, 0xdf, 0x2a, 0x33, 0x35, 0x42, 0xf5, 0x21,
|
||||||
0xd3, 0x4e, 0x8f, 0x6b, 0x57, 0xf2, 0x45, 0xa7, 0x9c, 0x69, 0xd2, 0x57, 0xa8, 0x72, 0xed, 0x92,
|
0x53, 0xd0, 0x45, 0xa8, 0xa4, 0xef, 0x87, 0xfb, 0xe7, 0x55, 0x41, 0xda, 0x15, 0xf4, 0x13, 0x58,
|
||||||
0x3f, 0x10, 0xd5, 0x66, 0xd0, 0xa3, 0x84, 0x7b, 0x45, 0xd7, 0xf2, 0x84, 0x93, 0xbc, 0x68, 0x39,
|
0xcd, 0x3d, 0xfe, 0x45, 0xdf, 0x90, 0xdf, 0xd6, 0xc8, 0xdf, 0x08, 0x2f, 0xc2, 0xf0, 0x61, 0xd6,
|
||||||
0x8e, 0x6f, 0xbf, 0x05, 0x88, 0xd9, 0x8e, 0x33, 0xb0, 0x86, 0x13, 0xcf, 0x60, 0x8a, 0x95, 0xe7,
|
0xbc, 0x8a, 0xa9, 0xcf, 0xbd, 0xe5, 0x2f, 0x4f, 0x7d, 0x62, 0xfb, 0xf3, 0xa8, 0x7f, 0x61, 0x0c,
|
||||||
0x6e, 0xb2, 0x53, 0x05, 0x9a, 0x9f, 0xbf, 0xc2, 0x8a, 0xf0, 0x48, 0x5d, 0x80, 0x6d, 0x1c, 0x3c,
|
0x73, 0x6a, 0x36, 0xd9, 0xd6, 0xf0, 0xdb, 0x32, 0x14, 0x85, 0x2f, 0x90, 0xfb, 0xdb, 0x65, 0xa7,
|
||||||
0xc4, 0x81, 0x67, 0xf5, 0xfd, 0xf4, 0x89, 0x22, 0x8f, 0xca, 0x27, 0x08, 0x54, 0x6f, 0x1f, 0x3b,
|
0x27, 0xb5, 0x2b, 0xfd, 0xc8, 0x55, 0xce, 0x34, 0xe9, 0xc3, 0x5c, 0xb9, 0x76, 0xc9, 0xdf, 0xcc,
|
||||||
0x2f, 0x44, 0xd0, 0x83, 0xda, 0x36, 0x0e, 0x78, 0x5e, 0xe5, 0xa3, 0xdc, 0x95, 0x62, 0x86, 0x40,
|
0x6a, 0x57, 0xd0, 0xa3, 0x94, 0x7b, 0x45, 0x6f, 0x14, 0x09, 0x27, 0x7d, 0x61, 0xb4, 0x88, 0x6f,
|
||||||
0xb1, 0x72, 0xfc, 0xc4, 0xb8, 0x3b, 0x4b, 0xbd, 0x6e, 0x45, 0xb9, 0x82, 0xcd, 0xbe, 0xb9, 0x95,
|
0xbf, 0x0b, 0x88, 0xd9, 0x8e, 0x3b, 0xb1, 0xa6, 0x73, 0xdf, 0x60, 0x8a, 0x55, 0xe4, 0x6e, 0xf2,
|
||||||
0xbb, 0xb3, 0x9c, 0xe7, 0xb2, 0xec, 0x44, 0x9b, 0xfb, 0xb8, 0xff, 0xec, 0x1e, 0x36, 0xec, 0x60,
|
0x53, 0x05, 0x9a, 0x6f, 0xbe, 0xc0, 0x8a, 0xe8, 0x48, 0x43, 0x80, 0x03, 0x1c, 0x3e, 0xc4, 0xa1,
|
||||||
0x3f, 0xe7, 0x44, 0xb1, 0x19, 0x47, 0x9f, 0x28, 0x31, 0x51, 0xe0, 0xd8, 0xf8, 0xa6, 0x09, 0x55,
|
0x6f, 0x8d, 0x83, 0xec, 0x89, 0x62, 0x8f, 0xca, 0x27, 0x08, 0x54, 0x6f, 0x2e, 0x9c, 0x17, 0x21,
|
||||||
0x1a, 0xff, 0x48, 0xb0, 0xfe, 0xff, 0xf0, 0xf7, 0x9a, 0xc3, 0xdf, 0xe7, 0x30, 0x9f, 0x7a, 0x8c,
|
0x18, 0x41, 0xeb, 0x00, 0x87, 0x3c, 0xaf, 0x0a, 0x50, 0xe1, 0x4a, 0x31, 0x43, 0xa0, 0xd8, 0x5c,
|
||||||
0x29, 0xd7, 0x17, 0xf9, 0x8b, 0xcd, 0x29, 0xbc, 0x78, 0xf2, 0xa1, 0xa4, 0xdc, 0x21, 0x49, 0x1f,
|
0x3c, 0x31, 0xe9, 0xce, 0x32, 0x0f, 0x7e, 0x51, 0xa1, 0x60, 0xf3, 0xcf, 0x90, 0xe5, 0xee, 0xac,
|
||||||
0x53, 0x1e, 0xb7, 0xf7, 0x13, 0xf6, 0x8e, 0x39, 0xec, 0x37, 0xbe, 0x9d, 0x5b, 0x79, 0x25, 0xef,
|
0xe0, 0x05, 0x31, 0x3b, 0xd1, 0xde, 0x09, 0x1e, 0x3f, 0x7d, 0x0f, 0x1b, 0x76, 0x78, 0x52, 0x70,
|
||||||
0xa9, 0xbf, 0xff, 0xe8, 0xf0, 0xe6, 0xa3, 0xe7, 0xe7, 0x30, 0x9f, 0x7a, 0x2d, 0x24, 0x97, 0xaa,
|
0xa2, 0xc4, 0x8c, 0xf3, 0x4f, 0x94, 0x9a, 0x28, 0x70, 0xec, 0x7e, 0xda, 0x85, 0x26, 0x8d, 0x7f,
|
||||||
0xfc, 0x49, 0xd1, 0x71, 0xbb, 0x7f, 0x87, 0x61, 0xc6, 0x84, 0x45, 0xc9, 0x43, 0x0e, 0xb4, 0x96,
|
0x24, 0x58, 0xff, 0x32, 0xfc, 0x7d, 0xc6, 0xe1, 0xef, 0x23, 0x58, 0xc9, 0xbc, 0x4f, 0x95, 0xeb,
|
||||||
0x57, 0xf9, 0xc8, 0x5f, 0x7c, 0x1c, 0x7f, 0xa0, 0x46, 0xc2, 0x94, 0xd0, 0x4a, 0x1e, 0x91, 0xe9,
|
0x8b, 0xfc, 0x11, 0x6b, 0x09, 0x2f, 0x9e, 0x7e, 0x3b, 0x2a, 0x77, 0x48, 0xd2, 0xf7, 0xa5, 0x8b,
|
||||||
0xbf, 0x93, 0xb5, 0x7f, 0x36, 0xdd, 0x7f, 0xcf, 0xc2, 0x03, 0x75, 0x60, 0x96, 0xbd, 0x21, 0x42,
|
0xf6, 0x7e, 0xc2, 0x9e, 0x76, 0x47, 0x7d, 0xd3, 0x37, 0x0b, 0x2b, 0xaf, 0xf4, 0x7d, 0xfb, 0x17,
|
||||||
0x6f, 0xc9, 0xfb, 0x72, 0xb1, 0xf7, 0x45, 0xed, 0xe3, 0x5e, 0x21, 0xf9, 0x13, 0x3b, 0xf0, 0xe9,
|
0x1f, 0x1d, 0x5e, 0x7e, 0xf4, 0xfc, 0x08, 0x56, 0x32, 0xaf, 0x9e, 0xe4, 0x52, 0x95, 0x3f, 0x8d,
|
||||||
0xa6, 0x65, 0xea, 0x21, 0x91, 0xf4, 0xf1, 0x5b, 0xfc, 0xe1, 0x4f, 0xfb, 0xf8, 0xb7, 0x3e, 0x62,
|
0x5a, 0xb4, 0xfb, 0xe7, 0x18, 0x66, 0x4c, 0x58, 0x93, 0x3c, 0x48, 0x41, 0xdb, 0x45, 0x95, 0x8f,
|
||||||
0xd3, 0xff, 0xdb, 0xb1, 0xf8, 0x25, 0x2c, 0x4a, 0xba, 0xe9, 0x28, 0x2f, 0xe7, 0xca, 0xe9, 0xe3,
|
0xfc, 0xe5, 0xca, 0xe2, 0x03, 0x75, 0x52, 0xa6, 0x84, 0x36, 0x8b, 0x88, 0xcc, 0xfe, 0xc3, 0xae,
|
||||||
0xb7, 0xd7, 0xa7, 0x9e, 0x1f, 0x62, 0xfe, 0x4d, 0x50, 0xd3, 0x1d, 0x05, 0x74, 0x3d, 0x4f, 0x9f,
|
0xff, 0x8d, 0x72, 0x7f, 0xc7, 0x8b, 0x0e, 0x34, 0x80, 0x25, 0xf6, 0x16, 0x0a, 0xbd, 0x26, 0xef,
|
||||||
0x65, 0x38, 0x8f, 0x56, 0xe6, 0x3b, 0xef, 0x3e, 0xdd, 0x18, 0x5a, 0xc1, 0xfe, 0xa4, 0x47, 0x46,
|
0xff, 0x25, 0xde, 0x49, 0xf5, 0x17, 0xbd, 0xa6, 0x0a, 0xe6, 0x76, 0x18, 0xd0, 0x4d, 0xeb, 0xd4,
|
||||||
0xd6, 0xd9, 0xd4, 0x77, 0x2c, 0x97, 0xff, 0x5a, 0x17, 0xfc, 0x5f, 0xa7, 0xab, 0xd7, 0x29, 0xaa,
|
0x43, 0x22, 0xe9, 0x23, 0xbe, 0xe4, 0x03, 0xa6, 0xfe, 0xe2, 0x37, 0x4b, 0x62, 0xd3, 0xff, 0xdf,
|
||||||
0x71, 0xaf, 0x37, 0x4b, 0x3f, 0x6f, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x22, 0x36,
|
0xb1, 0xf8, 0x63, 0x58, 0x93, 0xdc, 0x0a, 0xa0, 0xa2, 0x9c, 0xab, 0xe0, 0x3e, 0xa2, 0xbf, 0x53,
|
||||||
0x50, 0xc9, 0x3e, 0x00, 0x00,
|
0x7a, 0x7e, 0x84, 0xf9, 0xc7, 0xa0, 0x66, 0x3b, 0x0a, 0xe8, 0xad, 0x22, 0x7d, 0x96, 0xe1, 0x3c,
|
||||||
|
0x5f, 0x99, 0xef, 0x7f, 0xeb, 0xc3, 0xdd, 0xa9, 0x15, 0x9e, 0xcc, 0x47, 0x64, 0x64, 0x87, 0x4d,
|
||||||
|
0x7d, 0xdb, 0xf2, 0xf8, 0xaf, 0x1d, 0xc1, 0xff, 0x1d, 0xba, 0x7a, 0x87, 0xa2, 0x9a, 0x8d, 0x46,
|
||||||
|
0x4b, 0xf4, 0xf3, 0xce, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x10, 0x87, 0xa4, 0x30, 0xdc, 0x3f,
|
||||||
|
0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
|||||||
@ -25,6 +25,7 @@ import (
|
|||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,16 +74,14 @@ func (c *ChannelChecker) Check(ctx context.Context) []task.Task {
|
|||||||
|
|
||||||
func (c *ChannelChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
func (c *ChannelChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
||||||
ret := make([]task.Task, 0)
|
ret := make([]task.Task, 0)
|
||||||
targets := c.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID())
|
|
||||||
dists := c.getChannelDist(replica)
|
|
||||||
|
|
||||||
lacks, redundancies := diffChannels(targets, dists)
|
lacks, redundancies := c.getDmChannelDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||||
tasks := c.createChannelLoadTask(ctx, lacks, replica)
|
tasks := c.createChannelLoadTask(ctx, lacks, replica)
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
tasks = c.createChannelReduceTasks(ctx, redundancies, replica.GetID())
|
tasks = c.createChannelReduceTasks(ctx, redundancies, replica.GetID())
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
repeated := findRepeatedChannels(dists)
|
repeated := c.findRepeatedChannels(c.dist, c.meta, replica.GetID())
|
||||||
tasks = c.createChannelReduceTasks(ctx, repeated, replica.GetID())
|
tasks = c.createChannelReduceTasks(ctx, repeated, replica.GetID())
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
@ -91,38 +90,69 @@ func (c *ChannelChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelChecker) getChannelDist(replica *meta.Replica) []*meta.DmChannel {
|
// GetDmChannelDiff get channel diff between target and dist
|
||||||
dists := make([]*meta.DmChannel, 0)
|
func (c *ChannelChecker) getDmChannelDiff(targetMgr *meta.TargetManager,
|
||||||
for _, nodeID := range replica.Nodes.Collect() {
|
distMgr *meta.DistributionManager,
|
||||||
dists = append(dists, c.dist.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID)...)
|
metaInfo *meta.Meta,
|
||||||
|
collectionID int64,
|
||||||
|
replicaID int64) (toLoad, toRelease []*meta.DmChannel) {
|
||||||
|
replica := metaInfo.Get(replicaID)
|
||||||
|
if replica == nil {
|
||||||
|
log.Info("replica does not exist, skip it")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
return dists
|
|
||||||
}
|
|
||||||
|
|
||||||
func diffChannels(targets, dists []*meta.DmChannel) (lacks, redundancies []*meta.DmChannel) {
|
dist := c.getChannelDist(distMgr, replica)
|
||||||
distMap := make(map[string]struct{})
|
distMap := typeutil.NewSet[string]()
|
||||||
targetMap := make(map[string]struct{})
|
for _, ch := range dist {
|
||||||
for _, ch := range targets {
|
distMap.Insert(ch.GetChannelName())
|
||||||
targetMap[ch.GetChannelName()] = struct{}{}
|
|
||||||
}
|
}
|
||||||
for _, ch := range dists {
|
|
||||||
distMap[ch.GetChannelName()] = struct{}{}
|
nextTargetMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.NextTarget)
|
||||||
if _, ok := targetMap[ch.GetChannelName()]; !ok {
|
currentTargetMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.CurrentTarget)
|
||||||
redundancies = append(redundancies, ch)
|
|
||||||
|
// get channels which exists on dist, but not exist on current and next
|
||||||
|
for _, ch := range dist {
|
||||||
|
_, existOnCurrent := currentTargetMap[ch.GetChannelName()]
|
||||||
|
_, existOnNext := nextTargetMap[ch.GetChannelName()]
|
||||||
|
if !existOnNext && !existOnCurrent {
|
||||||
|
toRelease = append(toRelease, ch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ch := range targets {
|
|
||||||
if _, ok := distMap[ch.GetChannelName()]; !ok {
|
//get channels which exists on next target, but not on dist
|
||||||
lacks = append(lacks, ch)
|
for name, channel := range nextTargetMap {
|
||||||
|
_, existOnDist := distMap[name]
|
||||||
|
if !existOnDist {
|
||||||
|
toLoad = append(toLoad, channel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func findRepeatedChannels(dists []*meta.DmChannel) []*meta.DmChannel {
|
func (c *ChannelChecker) getChannelDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.DmChannel {
|
||||||
|
dist := make([]*meta.DmChannel, 0)
|
||||||
|
for _, nodeID := range replica.Nodes.Collect() {
|
||||||
|
dist = append(dist, distMgr.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID)...)
|
||||||
|
}
|
||||||
|
return dist
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChannelChecker) findRepeatedChannels(distMgr *meta.DistributionManager,
|
||||||
|
metaInfo *meta.Meta,
|
||||||
|
replicaID int64) []*meta.DmChannel {
|
||||||
|
replica := metaInfo.Get(replicaID)
|
||||||
ret := make([]*meta.DmChannel, 0)
|
ret := make([]*meta.DmChannel, 0)
|
||||||
|
|
||||||
|
if replica == nil {
|
||||||
|
log.Info("replica does not exist, skip it")
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
dist := c.getChannelDist(distMgr, replica)
|
||||||
|
|
||||||
versionsMap := make(map[string]*meta.DmChannel)
|
versionsMap := make(map[string]*meta.DmChannel)
|
||||||
for _, ch := range dists {
|
for _, ch := range dist {
|
||||||
maxVer, ok := versionsMap[ch.GetChannelName()]
|
maxVer, ok := versionsMap[ch.GetChannelName()]
|
||||||
if !ok {
|
if !ok {
|
||||||
versionsMap[ch.GetChannelName()] = ch
|
versionsMap[ch.GetChannelName()] = ch
|
||||||
|
|||||||
@ -20,21 +20,25 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ChannelCheckerTestSuite struct {
|
type ChannelCheckerTestSuite struct {
|
||||||
suite.Suite
|
suite.Suite
|
||||||
kv *etcdkv.EtcdKV
|
kv *etcdkv.EtcdKV
|
||||||
checker *ChannelChecker
|
checker *ChannelChecker
|
||||||
|
meta *meta.Meta
|
||||||
|
broker *meta.MockBroker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ChannelCheckerTestSuite) SetupSuite() {
|
func (suite *ChannelCheckerTestSuite) SetupSuite() {
|
||||||
@ -51,13 +55,14 @@ func (suite *ChannelCheckerTestSuite) SetupTest() {
|
|||||||
// meta
|
// meta
|
||||||
store := meta.NewMetaStore(suite.kv)
|
store := meta.NewMetaStore(suite.kv)
|
||||||
idAllocator := RandomIncrementIDAllocator()
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
testMeta := meta.NewMeta(idAllocator, store)
|
suite.meta = meta.NewMeta(idAllocator, store)
|
||||||
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
|
|
||||||
distManager := meta.NewDistributionManager()
|
distManager := meta.NewDistributionManager()
|
||||||
targetManager := meta.NewTargetManager()
|
|
||||||
|
|
||||||
balancer := suite.createMockBalancer()
|
balancer := suite.createMockBalancer()
|
||||||
suite.checker = NewChannelChecker(testMeta, distManager, targetManager, balancer)
|
suite.checker = NewChannelChecker(suite.meta, distManager, targetManager, balancer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ChannelCheckerTestSuite) TearDownTest() {
|
func (suite *ChannelCheckerTestSuite) TearDownTest() {
|
||||||
@ -87,7 +92,16 @@ func (suite *ChannelCheckerTestSuite) TestLoadChannel() {
|
|||||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1}))
|
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1}))
|
||||||
|
|
||||||
checker.targetMgr.AddDmChannel(utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, nil, nil)
|
||||||
|
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
|
||||||
tasks := checker.Check(context.TODO())
|
tasks := checker.Check(context.TODO())
|
||||||
suite.Len(tasks, 1)
|
suite.Len(tasks, 1)
|
||||||
@ -119,9 +133,27 @@ func (suite *ChannelCheckerTestSuite) TestReduceChannel() {
|
|||||||
|
|
||||||
func (suite *ChannelCheckerTestSuite) TestRepeatedChannels() {
|
func (suite *ChannelCheckerTestSuite) TestRepeatedChannels() {
|
||||||
checker := suite.checker
|
checker := suite.checker
|
||||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
err := checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
suite.NoError(err)
|
||||||
checker.targetMgr.AddDmChannel(utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
err = checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
|
suite.NoError(err)
|
||||||
|
|
||||||
|
segments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, segments, nil)
|
||||||
|
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
checker.dist.ChannelDistManager.Update(1, utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
checker.dist.ChannelDistManager.Update(1, utils.CreateTestChannel(1, 1, 1, "test-insert-channel"))
|
||||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 2, "test-insert-channel"))
|
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 2, "test-insert-channel"))
|
||||||
|
|
||||||
|
|||||||
@ -76,11 +76,9 @@ func (c *SegmentChecker) Check(ctx context.Context) []task.Task {
|
|||||||
|
|
||||||
func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica) []task.Task {
|
||||||
ret := make([]task.Task, 0)
|
ret := make([]task.Task, 0)
|
||||||
targets := c.targetMgr.GetSegmentsByCollection(replica.CollectionID)
|
|
||||||
dists := c.getSegmentsDist(replica)
|
|
||||||
|
|
||||||
// compare with targets to find the lack and redundancy of segments
|
// compare with targets to find the lack and redundancy of segments
|
||||||
lacks, redundancies := diffSegments(targets, dists)
|
lacks, redundancies := c.getHistoricalSegmentDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||||
tasks := c.createSegmentLoadTasks(ctx, lacks, replica)
|
tasks := c.createSegmentLoadTasks(ctx, lacks, replica)
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
@ -88,68 +86,146 @@ func (c *SegmentChecker) checkReplica(ctx context.Context, replica *meta.Replica
|
|||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
// compare inner dists to find repeated loaded segments
|
// compare inner dists to find repeated loaded segments
|
||||||
redundancies = findRepeatedSegments(dists)
|
redundancies = c.findRepeatedHistoricalSegments(c.dist, c.meta, replica.GetID())
|
||||||
redundancies = c.filterExistedOnLeader(replica, redundancies)
|
redundancies = c.filterExistedOnLeader(replica, redundancies)
|
||||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_All)
|
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_All)
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
// release redundant growing segments
|
// compare with target to find the lack and redundancy of segments
|
||||||
leaderRedundancies := c.findNeedReleasedGrowingSegments(replica)
|
_, redundancies = c.getStreamingSegmentDiff(c.targetMgr, c.dist, c.meta, replica.GetCollectionID(), replica.GetID())
|
||||||
redundancies = make([]*meta.Segment, 0)
|
|
||||||
for _, segments := range leaderRedundancies {
|
|
||||||
redundancies = append(redundancies, segments...)
|
|
||||||
}
|
|
||||||
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_Streaming)
|
tasks = c.createSegmentReduceTasks(ctx, redundancies, replica.GetID(), querypb.DataScope_Streaming)
|
||||||
ret = append(ret, tasks...)
|
ret = append(ret, tasks...)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SegmentChecker) getSegmentsDist(replica *meta.Replica) []*meta.Segment {
|
// GetStreamingSegmentDiff get streaming segment diff between leader view and target
|
||||||
|
func (c *SegmentChecker) getStreamingSegmentDiff(targetMgr *meta.TargetManager,
|
||||||
|
distMgr *meta.DistributionManager,
|
||||||
|
metaInfo *meta.Meta,
|
||||||
|
collectionID int64,
|
||||||
|
replicaID int64) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||||
|
replica := metaInfo.Get(replicaID)
|
||||||
|
if replica == nil {
|
||||||
|
log.Info("replica does not exist, skip it")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dist := c.getStreamingSegmentsDist(distMgr, replica)
|
||||||
|
distMap := typeutil.NewUniqueSet()
|
||||||
|
for _, s := range dist {
|
||||||
|
distMap.Insert(s.GetID())
|
||||||
|
}
|
||||||
|
|
||||||
|
nextTargetSegmentIDs := targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.NextTarget)
|
||||||
|
currentTargetSegmentIDs := targetMgr.GetStreamingSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||||
|
currentTargetChannelMap := targetMgr.GetDmChannelsByCollection(collectionID, meta.CurrentTarget)
|
||||||
|
|
||||||
|
// get segment which exist on dist, but not on current target and next target
|
||||||
|
for _, segment := range dist {
|
||||||
|
if !currentTargetSegmentIDs.Contain(segment.GetID()) && !nextTargetSegmentIDs.Contain(segment.GetID()) {
|
||||||
|
if channel, ok := currentTargetChannelMap[segment.InsertChannel]; ok {
|
||||||
|
timestampInSegment := segment.GetStartPosition().GetTimestamp()
|
||||||
|
timestampInTarget := channel.GetSeekPosition().GetTimestamp()
|
||||||
|
// filter toRelease which seekPosition is newer than next target dmChannel
|
||||||
|
if timestampInSegment < timestampInTarget {
|
||||||
|
log.Info("growing segment not exist in target, so release it",
|
||||||
|
zap.Int64("segmentID", segment.GetID()),
|
||||||
|
)
|
||||||
|
toRelease = append(toRelease, segment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SegmentChecker) getStreamingSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) map[int64]*meta.Segment {
|
||||||
|
segments := make(map[int64]*meta.Segment, 0)
|
||||||
|
for _, node := range replica.Nodes.Collect() {
|
||||||
|
segmentsOnNodes := distMgr.LeaderViewManager.GetGrowingSegmentDistByCollectionAndNode(replica.CollectionID, node)
|
||||||
|
for k, v := range segmentsOnNodes {
|
||||||
|
segments[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHistoricalSegmentDiff get historical segment diff between target and dist
|
||||||
|
func (c *SegmentChecker) getHistoricalSegmentDiff(targetMgr *meta.TargetManager,
|
||||||
|
distMgr *meta.DistributionManager,
|
||||||
|
metaInfo *meta.Meta,
|
||||||
|
collectionID int64,
|
||||||
|
replicaID int64) (toLoad []*datapb.SegmentInfo, toRelease []*meta.Segment) {
|
||||||
|
replica := metaInfo.Get(replicaID)
|
||||||
|
if replica == nil {
|
||||||
|
log.Info("replica does not exist, skip it")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dist := c.getHistoricalSegmentsDist(distMgr, replica)
|
||||||
|
distMap := typeutil.NewUniqueSet()
|
||||||
|
for _, s := range dist {
|
||||||
|
distMap.Insert(s.GetID())
|
||||||
|
}
|
||||||
|
|
||||||
|
nextTargetMap := targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||||
|
currentTargetMap := targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.CurrentTarget)
|
||||||
|
|
||||||
|
//get segment which exist on next target, but not on dist
|
||||||
|
for segmentID, segment := range nextTargetMap {
|
||||||
|
if !distMap.Contain(segmentID) {
|
||||||
|
toLoad = append(toLoad, segment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get segment which exist on dist, but not on current target and next target
|
||||||
|
for _, segment := range dist {
|
||||||
|
_, existOnCurrent := currentTargetMap[segment.GetID()]
|
||||||
|
_, existOnNext := nextTargetMap[segment.GetID()]
|
||||||
|
|
||||||
|
if !existOnNext && !existOnCurrent {
|
||||||
|
toRelease = append(toRelease, segment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SegmentChecker) getHistoricalSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.Segment {
|
||||||
ret := make([]*meta.Segment, 0)
|
ret := make([]*meta.Segment, 0)
|
||||||
for _, node := range replica.Nodes.Collect() {
|
for _, node := range replica.Nodes.Collect() {
|
||||||
ret = append(ret, c.dist.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
ret = append(ret, distMgr.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...)
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func diffSegments(targets []*datapb.SegmentInfo, dists []*meta.Segment) (lacks []*datapb.SegmentInfo, redundancies []*meta.Segment) {
|
func (c *SegmentChecker) findRepeatedHistoricalSegments(distMgr *meta.DistributionManager,
|
||||||
distMap := typeutil.NewUniqueSet()
|
metaInfo *meta.Meta,
|
||||||
targetMap := typeutil.NewUniqueSet()
|
replicaID int64) []*meta.Segment {
|
||||||
for _, s := range targets {
|
segments := make([]*meta.Segment, 0)
|
||||||
targetMap.Insert(s.GetID())
|
replica := metaInfo.Get(replicaID)
|
||||||
|
if replica == nil {
|
||||||
|
log.Info("replica does not exist, skip it")
|
||||||
|
return segments
|
||||||
}
|
}
|
||||||
for _, s := range dists {
|
dist := c.getHistoricalSegmentsDist(distMgr, replica)
|
||||||
distMap.Insert(s.GetID())
|
|
||||||
if !targetMap.Contain(s.GetID()) {
|
|
||||||
redundancies = append(redundancies, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, s := range targets {
|
|
||||||
if !distMap.Contain(s.GetID()) {
|
|
||||||
lacks = append(lacks, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func findRepeatedSegments(dists []*meta.Segment) []*meta.Segment {
|
|
||||||
ret := make([]*meta.Segment, 0)
|
|
||||||
versions := make(map[int64]*meta.Segment)
|
versions := make(map[int64]*meta.Segment)
|
||||||
for _, s := range dists {
|
for _, s := range dist {
|
||||||
maxVer, ok := versions[s.GetID()]
|
maxVer, ok := versions[s.GetID()]
|
||||||
if !ok {
|
if !ok {
|
||||||
versions[s.GetID()] = s
|
versions[s.GetID()] = s
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if maxVer.Version <= s.Version {
|
if maxVer.Version <= s.Version {
|
||||||
ret = append(ret, maxVer)
|
segments = append(segments, maxVer)
|
||||||
versions[s.GetID()] = s
|
versions[s.GetID()] = s
|
||||||
} else {
|
} else {
|
||||||
ret = append(ret, s)
|
segments = append(segments, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret
|
|
||||||
|
return segments
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SegmentChecker) filterExistedOnLeader(replica *meta.Replica, segments []*meta.Segment) []*meta.Segment {
|
func (c *SegmentChecker) filterExistedOnLeader(replica *meta.Replica, segments []*meta.Segment) []*meta.Segment {
|
||||||
@ -177,55 +253,6 @@ func (c *SegmentChecker) filterExistedOnLeader(replica *meta.Replica, segments [
|
|||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SegmentChecker) findNeedReleasedGrowingSegments(replica *meta.Replica) map[int64][]*meta.Segment {
|
|
||||||
ret := make(map[int64][]*meta.Segment, 0) // leaderID -> segment ids
|
|
||||||
leaders := c.dist.ChannelDistManager.GetShardLeadersByReplica(replica)
|
|
||||||
for shard, leaderID := range leaders {
|
|
||||||
leaderView := c.dist.LeaderViewManager.GetLeaderShardView(leaderID, shard)
|
|
||||||
if leaderView == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// find growing segments from leaderview's sealed segments
|
|
||||||
// because growing segments should be released only after loading the compaction created segment successfully.
|
|
||||||
for sid := range leaderView.Segments {
|
|
||||||
segment := c.targetMgr.GetSegment(sid)
|
|
||||||
if segment == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sources := append(segment.GetCompactionFrom(), segment.GetID())
|
|
||||||
for _, source := range sources {
|
|
||||||
if leaderView.GrowingSegments.Contain(source) {
|
|
||||||
ret[leaderView.ID] = append(ret[leaderView.ID], &meta.Segment{
|
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
|
||||||
ID: source,
|
|
||||||
CollectionID: replica.GetCollectionID(),
|
|
||||||
InsertChannel: leaderView.Channel,
|
|
||||||
},
|
|
||||||
Node: leaderID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func packSegments(segmentIDs []int64, nodeID int64, collectionID int64) []*meta.Segment {
|
|
||||||
ret := make([]*meta.Segment, 0, len(segmentIDs))
|
|
||||||
for _, id := range segmentIDs {
|
|
||||||
segment := &meta.Segment{
|
|
||||||
SegmentInfo: &datapb.SegmentInfo{
|
|
||||||
ID: id,
|
|
||||||
CollectionID: collectionID,
|
|
||||||
},
|
|
||||||
Node: nodeID,
|
|
||||||
}
|
|
||||||
ret = append(ret, segment)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *SegmentChecker) createSegmentLoadTasks(ctx context.Context, segments []*datapb.SegmentInfo, replica *meta.Replica) []task.Task {
|
func (c *SegmentChecker) createSegmentLoadTasks(ctx context.Context, segments []*datapb.SegmentInfo, replica *meta.Replica) []task.Task {
|
||||||
if len(segments) == 0 {
|
if len(segments) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -18,23 +18,29 @@ package checkers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SegmentCheckerTestSuite struct {
|
type SegmentCheckerTestSuite struct {
|
||||||
suite.Suite
|
suite.Suite
|
||||||
kv *etcdkv.EtcdKV
|
kv *etcdkv.EtcdKV
|
||||||
checker *SegmentChecker
|
checker *SegmentChecker
|
||||||
|
meta *meta.Meta
|
||||||
|
broker *meta.MockBroker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SegmentCheckerTestSuite) SetupSuite() {
|
func (suite *SegmentCheckerTestSuite) SetupSuite() {
|
||||||
@ -51,13 +57,13 @@ func (suite *SegmentCheckerTestSuite) SetupTest() {
|
|||||||
// meta
|
// meta
|
||||||
store := meta.NewMetaStore(suite.kv)
|
store := meta.NewMetaStore(suite.kv)
|
||||||
idAllocator := RandomIncrementIDAllocator()
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
testMeta := meta.NewMeta(idAllocator, store)
|
suite.meta = meta.NewMeta(idAllocator, store)
|
||||||
|
|
||||||
distManager := meta.NewDistributionManager()
|
distManager := meta.NewDistributionManager()
|
||||||
targetManager := meta.NewTargetManager()
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
|
|
||||||
balancer := suite.createMockBalancer()
|
balancer := suite.createMockBalancer()
|
||||||
suite.checker = NewSegmentChecker(testMeta, distManager, targetManager, balancer)
|
suite.checker = NewSegmentChecker(suite.meta, distManager, targetManager, balancer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SegmentCheckerTestSuite) TearDownTest() {
|
func (suite *SegmentCheckerTestSuite) TearDownTest() {
|
||||||
@ -89,11 +95,19 @@ func (suite *SegmentCheckerTestSuite) TestLoadSegments() {
|
|||||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
|
|
||||||
// set target
|
// set target
|
||||||
checker.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
segments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
nil, segments, nil)
|
||||||
|
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
|
||||||
// set dist
|
// set dist
|
||||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||||
|
|
||||||
tasks := checker.Check(context.TODO())
|
tasks := checker.Check(context.TODO())
|
||||||
suite.Len(tasks, 1)
|
suite.Len(tasks, 1)
|
||||||
@ -114,7 +128,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseSegments() {
|
|||||||
|
|
||||||
// set dist
|
// set dist
|
||||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||||
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 2, 1, 1, "test-insert-channel"))
|
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 2, 1, 1, "test-insert-channel"))
|
||||||
|
|
||||||
tasks := checker.Check(context.TODO())
|
tasks := checker.Check(context.TODO())
|
||||||
@ -134,11 +148,19 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
|
|
||||||
// set target
|
// set target
|
||||||
checker.targetMgr.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
segments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
nil, segments, nil)
|
||||||
|
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
|
||||||
// set dist
|
// set dist
|
||||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 2}, []int64{}))
|
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 2}, map[int64]*meta.Segment{}))
|
||||||
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
checker.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||||
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 1, 1, 2, "test-insert-channel"))
|
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 1, 1, 2, "test-insert-channel"))
|
||||||
|
|
||||||
@ -153,7 +175,7 @@ func (suite *SegmentCheckerTestSuite) TestReleaseRepeatedSegments() {
|
|||||||
suite.EqualValues(1, action.Node())
|
suite.EqualValues(1, action.Node())
|
||||||
|
|
||||||
// test less version exist on leader
|
// test less version exist on leader
|
||||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 1}, []int64{}))
|
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{1: 1}, map[int64]*meta.Segment{}))
|
||||||
tasks = checker.Check(context.TODO())
|
tasks = checker.Check(context.TODO())
|
||||||
suite.Len(tasks, 0)
|
suite.Len(tasks, 0)
|
||||||
}
|
}
|
||||||
@ -165,16 +187,43 @@ func (suite *SegmentCheckerTestSuite) TestReleaseGrowingSegments() {
|
|||||||
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||||
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
|
|
||||||
segment := utils.CreateTestSegmentInfo(1, 1, 3, "test-insert-channel")
|
segments := []*datapb.SegmentBinlogs{
|
||||||
segment.CompactionFrom = append(segment.CompactionFrom, 2)
|
{
|
||||||
checker.targetMgr.AddSegment(segment)
|
SegmentID: 3,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
SeekPosition: &internalpb.MsgPosition{Timestamp: 10},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, segments, nil)
|
||||||
|
checker.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
checker.targetMgr.UpdateCollectionCurrentTarget(int64(1), int64(1))
|
||||||
|
|
||||||
checker.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
growingSegments := make(map[int64]*meta.Segment)
|
||||||
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, []int64{2, 3}))
|
growingSegments[2] = utils.CreateTestSegment(1, 1, 2, 2, 0, "test-insert-channel")
|
||||||
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 3, 2, 1, "test-insert-channel"))
|
growingSegments[2].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 2}
|
||||||
|
growingSegments[3] = utils.CreateTestSegment(1, 1, 3, 2, 1, "test-insert-channel")
|
||||||
|
growingSegments[3].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 3}
|
||||||
|
growingSegments[4] = utils.CreateTestSegment(1, 1, 4, 2, 1, "test-insert-channel")
|
||||||
|
growingSegments[4].SegmentInfo.StartPosition = &internalpb.MsgPosition{Timestamp: 11}
|
||||||
|
|
||||||
|
dmChannel := utils.CreateTestChannel(1, 2, 1, "test-insert-channel")
|
||||||
|
dmChannel.UnflushedSegmentIds = []int64{2, 3}
|
||||||
|
checker.dist.ChannelDistManager.Update(2, dmChannel)
|
||||||
|
checker.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, growingSegments))
|
||||||
|
checker.dist.SegmentDistManager.Update(2, utils.CreateTestSegment(1, 1, 3, 2, 2, "test-insert-channel"))
|
||||||
|
|
||||||
tasks := checker.Check(context.TODO())
|
tasks := checker.Check(context.TODO())
|
||||||
suite.Len(tasks, 2)
|
suite.Len(tasks, 2)
|
||||||
|
sort.Slice(tasks, func(i, j int) bool {
|
||||||
|
return tasks[i].Actions()[0].(*task.SegmentAction).SegmentID() < tasks[j].Actions()[0].(*task.SegmentAction).SegmentID()
|
||||||
|
})
|
||||||
suite.Len(tasks[0].Actions(), 1)
|
suite.Len(tasks[0].Actions(), 1)
|
||||||
action, ok := tasks[0].Actions()[0].(*task.SegmentAction)
|
action, ok := tasks[0].Actions()[0].(*task.SegmentAction)
|
||||||
suite.True(ok)
|
suite.True(ok)
|
||||||
|
|||||||
@ -21,15 +21,18 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type DistControllerTestSuite struct {
|
type DistControllerTestSuite struct {
|
||||||
@ -37,19 +40,39 @@ type DistControllerTestSuite struct {
|
|||||||
controller *Controller
|
controller *Controller
|
||||||
mockCluster *session.MockCluster
|
mockCluster *session.MockCluster
|
||||||
mockScheduler *task.MockScheduler
|
mockScheduler *task.MockScheduler
|
||||||
|
|
||||||
|
kv *etcdkv.EtcdKV
|
||||||
|
meta *meta.Meta
|
||||||
|
broker *meta.MockBroker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *DistControllerTestSuite) SetupTest() {
|
func (suite *DistControllerTestSuite) SetupTest() {
|
||||||
Params.Init()
|
Params.Init()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
config := GenerateEtcdConfig()
|
||||||
|
cli, err := etcd.GetEtcdClient(&config)
|
||||||
|
suite.Require().NoError(err)
|
||||||
|
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||||
|
|
||||||
|
// meta
|
||||||
|
store := meta.NewMetaStore(suite.kv)
|
||||||
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
|
suite.meta = meta.NewMeta(idAllocator, store)
|
||||||
|
|
||||||
suite.mockCluster = session.NewMockCluster(suite.T())
|
suite.mockCluster = session.NewMockCluster(suite.T())
|
||||||
nodeManager := session.NewNodeManager()
|
nodeManager := session.NewNodeManager()
|
||||||
distManager := meta.NewDistributionManager()
|
distManager := meta.NewDistributionManager()
|
||||||
targetManager := meta.NewTargetManager()
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
suite.mockScheduler = task.NewMockScheduler(suite.T())
|
suite.mockScheduler = task.NewMockScheduler(suite.T())
|
||||||
suite.controller = NewDistController(suite.mockCluster, nodeManager, distManager, targetManager, suite.mockScheduler)
|
suite.controller = NewDistController(suite.mockCluster, nodeManager, distManager, targetManager, suite.mockScheduler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *DistControllerTestSuite) TearDownSuite() {
|
||||||
|
suite.kv.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *DistControllerTestSuite) TestStart() {
|
func (suite *DistControllerTestSuite) TestStart() {
|
||||||
dispatchCalled := atomic.NewBool(false)
|
dispatchCalled := atomic.NewBool(false)
|
||||||
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Return(
|
suite.mockCluster.EXPECT().GetDataDistribution(mock.Anything, mock.Anything, mock.Anything).Return(
|
||||||
|
|||||||
26
internal/querycoordv2/dist/dist_handler.go
vendored
26
internal/querycoordv2/dist/dist_handler.go
vendored
@ -32,7 +32,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
"github.com/milvus-io/milvus/internal/util/commonpbutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -128,7 +127,12 @@ func (dh *distHandler) handleDistResp(resp *querypb.GetDataDistributionResponse)
|
|||||||
func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistributionResponse) {
|
func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistributionResponse) {
|
||||||
updates := make([]*meta.Segment, 0, len(resp.GetSegments()))
|
updates := make([]*meta.Segment, 0, len(resp.GetSegments()))
|
||||||
for _, s := range resp.GetSegments() {
|
for _, s := range resp.GetSegments() {
|
||||||
segmentInfo := dh.target.GetSegment(s.GetID())
|
// for collection which is already loaded
|
||||||
|
segmentInfo := dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.CurrentTarget)
|
||||||
|
if segmentInfo == nil {
|
||||||
|
// for collection which is loading
|
||||||
|
segmentInfo = dh.target.GetHistoricalSegment(s.GetCollection(), s.GetID(), meta.NextTarget)
|
||||||
|
}
|
||||||
var segment *meta.Segment
|
var segment *meta.Segment
|
||||||
if segmentInfo == nil {
|
if segmentInfo == nil {
|
||||||
segment = &meta.Segment{
|
segment = &meta.Segment{
|
||||||
@ -157,7 +161,7 @@ func (dh *distHandler) updateSegmentsDistribution(resp *querypb.GetDataDistribut
|
|||||||
func (dh *distHandler) updateChannelsDistribution(resp *querypb.GetDataDistributionResponse) {
|
func (dh *distHandler) updateChannelsDistribution(resp *querypb.GetDataDistributionResponse) {
|
||||||
updates := make([]*meta.DmChannel, 0, len(resp.GetChannels()))
|
updates := make([]*meta.DmChannel, 0, len(resp.GetChannels()))
|
||||||
for _, ch := range resp.GetChannels() {
|
for _, ch := range resp.GetChannels() {
|
||||||
channelInfo := dh.target.GetDmChannel(ch.GetChannel())
|
channelInfo := dh.target.GetDmChannel(ch.GetCollection(), ch.GetChannel(), meta.CurrentTarget)
|
||||||
var channel *meta.DmChannel
|
var channel *meta.DmChannel
|
||||||
if channelInfo == nil {
|
if channelInfo == nil {
|
||||||
channel = &meta.DmChannel{
|
channel = &meta.DmChannel{
|
||||||
@ -180,12 +184,26 @@ func (dh *distHandler) updateChannelsDistribution(resp *querypb.GetDataDistribut
|
|||||||
func (dh *distHandler) updateLeaderView(resp *querypb.GetDataDistributionResponse) {
|
func (dh *distHandler) updateLeaderView(resp *querypb.GetDataDistributionResponse) {
|
||||||
updates := make([]*meta.LeaderView, 0, len(resp.GetLeaderViews()))
|
updates := make([]*meta.LeaderView, 0, len(resp.GetLeaderViews()))
|
||||||
for _, lview := range resp.GetLeaderViews() {
|
for _, lview := range resp.GetLeaderViews() {
|
||||||
|
segments := make(map[int64]*meta.Segment)
|
||||||
|
|
||||||
|
for ID, position := range lview.GrowingSegments {
|
||||||
|
segments[ID] = &meta.Segment{
|
||||||
|
SegmentInfo: &datapb.SegmentInfo{
|
||||||
|
ID: ID,
|
||||||
|
CollectionID: lview.GetCollection(),
|
||||||
|
StartPosition: position,
|
||||||
|
InsertChannel: lview.GetChannel(),
|
||||||
|
},
|
||||||
|
Node: resp.NodeID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
view := &meta.LeaderView{
|
view := &meta.LeaderView{
|
||||||
ID: resp.GetNodeID(),
|
ID: resp.GetNodeID(),
|
||||||
CollectionID: lview.GetCollection(),
|
CollectionID: lview.GetCollection(),
|
||||||
Channel: lview.GetChannel(),
|
Channel: lview.GetChannel(),
|
||||||
Segments: lview.GetSegmentDist(),
|
Segments: lview.GetSegmentDist(),
|
||||||
GrowingSegments: typeutil.NewUniqueSet(lview.GetGrowingSegmentIDs()...),
|
GrowingSegments: segments,
|
||||||
}
|
}
|
||||||
updates = append(updates, view)
|
updates = append(updates, view)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -287,7 +287,7 @@ func (s *Server) tryGetNodesMetrics(ctx context.Context, req *milvuspb.GetMetric
|
|||||||
func (s *Server) fillReplicaInfo(replica *meta.Replica, withShardNodes bool) (*milvuspb.ReplicaInfo, error) {
|
func (s *Server) fillReplicaInfo(replica *meta.Replica, withShardNodes bool) (*milvuspb.ReplicaInfo, error) {
|
||||||
info := utils.Replica2ReplicaInfo(replica.Replica)
|
info := utils.Replica2ReplicaInfo(replica.Replica)
|
||||||
|
|
||||||
channels := s.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID())
|
channels := s.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID(), meta.CurrentTarget)
|
||||||
if len(channels) == 0 {
|
if len(channels) == 0 {
|
||||||
msg := "failed to get channels, collection not loaded"
|
msg := "failed to get channels, collection not loaded"
|
||||||
log.Warn(msg)
|
log.Warn(msg)
|
||||||
|
|||||||
@ -21,15 +21,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/samber/lo"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/samber/lo"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Job is request of loading/releasing collection/partitions,
|
// Job is request of loading/releasing collection/partitions,
|
||||||
@ -109,7 +109,6 @@ type LoadCollectionJob struct {
|
|||||||
targetMgr *meta.TargetManager
|
targetMgr *meta.TargetManager
|
||||||
broker meta.Broker
|
broker meta.Broker
|
||||||
nodeMgr *session.NodeManager
|
nodeMgr *session.NodeManager
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLoadCollectionJob(
|
func NewLoadCollectionJob(
|
||||||
@ -120,7 +119,6 @@ func NewLoadCollectionJob(
|
|||||||
targetMgr *meta.TargetManager,
|
targetMgr *meta.TargetManager,
|
||||||
broker meta.Broker,
|
broker meta.Broker,
|
||||||
nodeMgr *session.NodeManager,
|
nodeMgr *session.NodeManager,
|
||||||
handoffObserver *observers.HandoffObserver,
|
|
||||||
) *LoadCollectionJob {
|
) *LoadCollectionJob {
|
||||||
return &LoadCollectionJob{
|
return &LoadCollectionJob{
|
||||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||||
@ -130,7 +128,6 @@ func NewLoadCollectionJob(
|
|||||||
targetMgr: targetMgr,
|
targetMgr: targetMgr,
|
||||||
broker: broker,
|
broker: broker,
|
||||||
nodeMgr: nodeMgr,
|
nodeMgr: nodeMgr,
|
||||||
handoffObserver: handoffObserver,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,25 +197,19 @@ func (job *LoadCollectionJob) Execute() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch channels and segments from DataCoord
|
// Fetch channels and segments from DataCoord
|
||||||
partitions, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID())
|
partitionIDs, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "failed to get partitions from RootCoord"
|
msg := "failed to get partitions from RootCoord"
|
||||||
log.Error(msg, zap.Error(err))
|
log.Error(msg, zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
job.handoffObserver.Register(job.CollectionID())
|
err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), partitionIDs...)
|
||||||
err = utils.RegisterTargets(job.ctx,
|
|
||||||
job.targetMgr,
|
|
||||||
job.broker,
|
|
||||||
req.GetCollectionID(),
|
|
||||||
partitions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "failed to register channels and segments"
|
msg := "failed to update next targets for collection"
|
||||||
log.Error(msg, zap.Error(err))
|
log.Error(msg, zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
job.handoffObserver.StartHandoff(job.CollectionID())
|
|
||||||
|
|
||||||
err = job.meta.CollectionManager.PutCollection(&meta.Collection{
|
err = job.meta.CollectionManager.PutCollection(&meta.Collection{
|
||||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||||
@ -242,7 +233,6 @@ func (job *LoadCollectionJob) Execute() error {
|
|||||||
func (job *LoadCollectionJob) PostExecute() {
|
func (job *LoadCollectionJob) PostExecute() {
|
||||||
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
||||||
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
||||||
job.handoffObserver.Unregister(job.ctx)
|
|
||||||
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,7 +243,6 @@ type ReleaseCollectionJob struct {
|
|||||||
dist *meta.DistributionManager
|
dist *meta.DistributionManager
|
||||||
meta *meta.Meta
|
meta *meta.Meta
|
||||||
targetMgr *meta.TargetManager
|
targetMgr *meta.TargetManager
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReleaseCollectionJob(ctx context.Context,
|
func NewReleaseCollectionJob(ctx context.Context,
|
||||||
@ -261,7 +250,6 @@ func NewReleaseCollectionJob(ctx context.Context,
|
|||||||
dist *meta.DistributionManager,
|
dist *meta.DistributionManager,
|
||||||
meta *meta.Meta,
|
meta *meta.Meta,
|
||||||
targetMgr *meta.TargetManager,
|
targetMgr *meta.TargetManager,
|
||||||
handoffObserver *observers.HandoffObserver,
|
|
||||||
) *ReleaseCollectionJob {
|
) *ReleaseCollectionJob {
|
||||||
return &ReleaseCollectionJob{
|
return &ReleaseCollectionJob{
|
||||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||||
@ -269,7 +257,6 @@ func NewReleaseCollectionJob(ctx context.Context,
|
|||||||
dist: dist,
|
dist: dist,
|
||||||
meta: meta,
|
meta: meta,
|
||||||
targetMgr: targetMgr,
|
targetMgr: targetMgr,
|
||||||
handoffObserver: handoffObserver,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -290,8 +277,6 @@ func (job *ReleaseCollectionJob) Execute() error {
|
|||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
|
||||||
|
|
||||||
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "failed to remove replicas"
|
msg := "failed to remove replicas"
|
||||||
@ -312,7 +297,6 @@ type LoadPartitionJob struct {
|
|||||||
targetMgr *meta.TargetManager
|
targetMgr *meta.TargetManager
|
||||||
broker meta.Broker
|
broker meta.Broker
|
||||||
nodeMgr *session.NodeManager
|
nodeMgr *session.NodeManager
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLoadPartitionJob(
|
func NewLoadPartitionJob(
|
||||||
@ -323,7 +307,6 @@ func NewLoadPartitionJob(
|
|||||||
targetMgr *meta.TargetManager,
|
targetMgr *meta.TargetManager,
|
||||||
broker meta.Broker,
|
broker meta.Broker,
|
||||||
nodeMgr *session.NodeManager,
|
nodeMgr *session.NodeManager,
|
||||||
handoffObserver *observers.HandoffObserver,
|
|
||||||
) *LoadPartitionJob {
|
) *LoadPartitionJob {
|
||||||
return &LoadPartitionJob{
|
return &LoadPartitionJob{
|
||||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||||
@ -333,7 +316,6 @@ func NewLoadPartitionJob(
|
|||||||
targetMgr: targetMgr,
|
targetMgr: targetMgr,
|
||||||
broker: broker,
|
broker: broker,
|
||||||
nodeMgr: nodeMgr,
|
nodeMgr: nodeMgr,
|
||||||
handoffObserver: handoffObserver,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -393,6 +375,7 @@ func (job *LoadPartitionJob) Execute() error {
|
|||||||
req := job.req
|
req := job.req
|
||||||
log := log.Ctx(job.ctx).With(
|
log := log.Ctx(job.ctx).With(
|
||||||
zap.Int64("collectionID", req.GetCollectionID()),
|
zap.Int64("collectionID", req.GetCollectionID()),
|
||||||
|
zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create replicas
|
// Create replicas
|
||||||
@ -412,19 +395,14 @@ func (job *LoadPartitionJob) Execute() error {
|
|||||||
zap.Int64s("nodes", replica.GetNodes()))
|
zap.Int64s("nodes", replica.GetNodes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
job.handoffObserver.Register(job.CollectionID())
|
err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), req.GetPartitionIDs()...)
|
||||||
err = utils.RegisterTargets(job.ctx,
|
|
||||||
job.targetMgr,
|
|
||||||
job.broker,
|
|
||||||
req.GetCollectionID(),
|
|
||||||
req.GetPartitionIDs())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "failed to register channels and segments"
|
msg := "failed to update next targets for collection"
|
||||||
log.Error(msg, zap.Error(err))
|
log.Error(msg,
|
||||||
|
zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
||||||
|
zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
job.handoffObserver.StartHandoff(job.CollectionID())
|
|
||||||
|
|
||||||
partitions := lo.Map(req.GetPartitionIDs(), func(partition int64, _ int) *meta.Partition {
|
partitions := lo.Map(req.GetPartitionIDs(), func(partition int64, _ int) *meta.Partition {
|
||||||
return &meta.Partition{
|
return &meta.Partition{
|
||||||
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||||
@ -450,7 +428,6 @@ func (job *LoadPartitionJob) Execute() error {
|
|||||||
func (job *LoadPartitionJob) PostExecute() {
|
func (job *LoadPartitionJob) PostExecute() {
|
||||||
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
if job.Error() != nil && !job.meta.Exist(job.CollectionID()) {
|
||||||
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
job.meta.ReplicaManager.RemoveCollection(job.CollectionID())
|
||||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
|
||||||
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
job.targetMgr.RemoveCollection(job.req.GetCollectionID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,7 +438,6 @@ type ReleasePartitionJob struct {
|
|||||||
dist *meta.DistributionManager
|
dist *meta.DistributionManager
|
||||||
meta *meta.Meta
|
meta *meta.Meta
|
||||||
targetMgr *meta.TargetManager
|
targetMgr *meta.TargetManager
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReleasePartitionJob(ctx context.Context,
|
func NewReleasePartitionJob(ctx context.Context,
|
||||||
@ -469,7 +445,6 @@ func NewReleasePartitionJob(ctx context.Context,
|
|||||||
dist *meta.DistributionManager,
|
dist *meta.DistributionManager,
|
||||||
meta *meta.Meta,
|
meta *meta.Meta,
|
||||||
targetMgr *meta.TargetManager,
|
targetMgr *meta.TargetManager,
|
||||||
handoffObserver *observers.HandoffObserver,
|
|
||||||
) *ReleasePartitionJob {
|
) *ReleasePartitionJob {
|
||||||
return &ReleasePartitionJob{
|
return &ReleasePartitionJob{
|
||||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||||
@ -477,7 +452,6 @@ func NewReleasePartitionJob(ctx context.Context,
|
|||||||
dist: dist,
|
dist: dist,
|
||||||
meta: meta,
|
meta: meta,
|
||||||
targetMgr: targetMgr,
|
targetMgr: targetMgr,
|
||||||
handoffObserver: handoffObserver,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,7 +494,6 @@ func (job *ReleasePartitionJob) Execute() error {
|
|||||||
log.Warn(msg, zap.Error(err))
|
log.Warn(msg, zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
job.handoffObserver.Unregister(job.ctx, job.CollectionID())
|
|
||||||
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("failed to remove replicas", zap.Error(err))
|
log.Warn("failed to remove replicas", zap.Error(err))
|
||||||
@ -534,9 +507,7 @@ func (job *ReleasePartitionJob) Execute() error {
|
|||||||
log.Warn(msg, zap.Error(err))
|
log.Warn(msg, zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
for _, partition := range toRelease {
|
job.targetMgr.RemovePartition(req.GetCollectionID(), toRelease...)
|
||||||
job.targetMgr.RemovePartition(partition)
|
|
||||||
}
|
|
||||||
waitCollectionReleased(job.dist, req.GetCollectionID(), toRelease...)
|
waitCollectionReleased(job.dist, req.GetCollectionID(), toRelease...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -21,17 +21,17 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/kv"
|
"github.com/milvus-io/milvus/internal/kv"
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -57,7 +57,6 @@ type JobSuite struct {
|
|||||||
targetMgr *meta.TargetManager
|
targetMgr *meta.TargetManager
|
||||||
broker *meta.MockBroker
|
broker *meta.MockBroker
|
||||||
nodeMgr *session.NodeManager
|
nodeMgr *session.NodeManager
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
|
|
||||||
// Test objects
|
// Test objects
|
||||||
scheduler *Scheduler
|
scheduler *Scheduler
|
||||||
@ -124,16 +123,9 @@ func (suite *JobSuite) SetupTest() {
|
|||||||
suite.store = meta.NewMetaStore(suite.kv)
|
suite.store = meta.NewMetaStore(suite.kv)
|
||||||
suite.dist = meta.NewDistributionManager()
|
suite.dist = meta.NewDistributionManager()
|
||||||
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
||||||
suite.targetMgr = meta.NewTargetManager()
|
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
suite.nodeMgr = session.NewNodeManager()
|
suite.nodeMgr = session.NewNodeManager()
|
||||||
suite.nodeMgr.Add(&session.NodeInfo{})
|
suite.nodeMgr.Add(&session.NodeInfo{})
|
||||||
suite.handoffObserver = observers.NewHandoffObserver(
|
|
||||||
suite.store,
|
|
||||||
suite.meta,
|
|
||||||
suite.dist,
|
|
||||||
suite.targetMgr,
|
|
||||||
suite.broker,
|
|
||||||
)
|
|
||||||
suite.scheduler = NewScheduler()
|
suite.scheduler = NewScheduler()
|
||||||
|
|
||||||
suite.scheduler.Start(context.Background())
|
suite.scheduler.Start(context.Background())
|
||||||
@ -180,12 +172,12 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||||
suite.assertLoaded(collection)
|
suite.assertLoaded(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +197,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -229,7 +220,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -255,7 +245,6 @@ func (suite *JobSuite) TestLoadCollection() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -284,7 +273,6 @@ func (suite *JobSuite) TestLoadCollectionWithReplicas() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -315,12 +303,12 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||||
suite.assertLoaded(collection)
|
suite.assertLoaded(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +331,6 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -363,7 +350,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
req := &querypb.LoadPartitionsRequest{
|
req := &querypb.LoadPartitionsRequest{
|
||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
PartitionIDs: suite.partitions[collection],
|
PartitionIDs: suite.partitions[collection],
|
||||||
// ReplicaNumber: 1,
|
ReplicaNumber: 1,
|
||||||
}
|
}
|
||||||
job := NewLoadPartitionJob(
|
job := NewLoadPartitionJob(
|
||||||
ctx,
|
ctx,
|
||||||
@ -373,12 +360,12 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||||
suite.assertLoaded(collection)
|
suite.assertLoaded(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,7 +388,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -427,7 +413,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -453,7 +438,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -478,7 +462,6 @@ func (suite *JobSuite) TestLoadPartition() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -508,7 +491,6 @@ func (suite *JobSuite) TestLoadPartitionWithReplicas() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -540,12 +522,12 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...)
|
||||||
suite.assertLoaded(collection)
|
suite.assertLoaded(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,7 +552,6 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -594,7 +575,6 @@ func (suite *JobSuite) TestReleaseCollection() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -613,7 +593,6 @@ func (suite *JobSuite) TestReleaseCollection() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -639,7 +618,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -664,7 +642,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -691,7 +668,6 @@ func (suite *JobSuite) TestReleasePartition() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -737,7 +713,6 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
loadErr := job.Wait()
|
loadErr := job.Wait()
|
||||||
@ -771,7 +746,6 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
loadErr := job.Wait()
|
loadErr := job.Wait()
|
||||||
@ -794,7 +768,6 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -817,7 +790,6 @@ func (suite *JobSuite) loadAll() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -825,6 +797,7 @@ func (suite *JobSuite) loadAll() {
|
|||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
suite.NotNil(suite.meta.GetCollection(collection))
|
suite.NotNil(suite.meta.GetCollection(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||||
} else {
|
} else {
|
||||||
req := &querypb.LoadPartitionsRequest{
|
req := &querypb.LoadPartitionsRequest{
|
||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
@ -838,7 +811,6 @@ func (suite *JobSuite) loadAll() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -846,6 +818,7 @@ func (suite *JobSuite) loadAll() {
|
|||||||
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(1, suite.meta.GetReplicaNumber(collection))
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -862,7 +835,6 @@ func (suite *JobSuite) releaseAll() {
|
|||||||
suite.dist,
|
suite.dist,
|
||||||
suite.meta,
|
suite.meta,
|
||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.scheduler.Add(job)
|
suite.scheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -874,11 +846,11 @@ func (suite *JobSuite) releaseAll() {
|
|||||||
func (suite *JobSuite) assertLoaded(collection int64) {
|
func (suite *JobSuite) assertLoaded(collection int64) {
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
for _, partitions := range suite.segments[collection] {
|
for _, partitions := range suite.segments[collection] {
|
||||||
for _, segment := range partitions {
|
for _, segment := range partitions {
|
||||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -886,11 +858,11 @@ func (suite *JobSuite) assertLoaded(collection int64) {
|
|||||||
func (suite *JobSuite) assertReleased(collection int64) {
|
func (suite *JobSuite) assertReleased(collection int64) {
|
||||||
suite.False(suite.meta.Exist(collection))
|
suite.False(suite.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.Nil(suite.targetMgr.GetDmChannel(channel))
|
suite.Nil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
for _, partitions := range suite.segments[collection] {
|
for _, partitions := range suite.segments[collection] {
|
||||||
for _, segment := range partitions {
|
for _, segment := range partitions {
|
||||||
suite.Nil(suite.targetMgr.GetSegment(segment))
|
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -19,8 +19,9 @@ package meta
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/samber/lo"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type LeaderView struct {
|
type LeaderView struct {
|
||||||
@ -28,7 +29,7 @@ type LeaderView struct {
|
|||||||
CollectionID int64
|
CollectionID int64
|
||||||
Channel string
|
Channel string
|
||||||
Segments map[int64]*querypb.SegmentDist
|
Segments map[int64]*querypb.SegmentDist
|
||||||
GrowingSegments typeutil.UniqueSet
|
GrowingSegments map[int64]*Segment
|
||||||
}
|
}
|
||||||
|
|
||||||
func (view *LeaderView) Clone() *LeaderView {
|
func (view *LeaderView) Clone() *LeaderView {
|
||||||
@ -36,7 +37,11 @@ func (view *LeaderView) Clone() *LeaderView {
|
|||||||
for k, v := range view.Segments {
|
for k, v := range view.Segments {
|
||||||
segments[k] = v
|
segments[k] = v
|
||||||
}
|
}
|
||||||
growings := typeutil.NewUniqueSet(view.GrowingSegments.Collect()...)
|
|
||||||
|
growings := make(map[int64]*Segment)
|
||||||
|
for k, v := range view.GrowingSegments {
|
||||||
|
growings[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
return &LeaderView{
|
return &LeaderView{
|
||||||
ID: view.ID,
|
ID: view.ID,
|
||||||
@ -75,7 +80,7 @@ func (mgr *LeaderViewManager) GetSegmentByNode(nodeID int64) []int64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if leaderID == nodeID {
|
if leaderID == nodeID {
|
||||||
segments = append(segments, view.GrowingSegments.Collect()...)
|
segments = append(segments, lo.Keys(view.GrowingSegments)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -104,7 +109,7 @@ func (mgr *LeaderViewManager) GetSegmentDist(segmentID int64) []int64 {
|
|||||||
if ok {
|
if ok {
|
||||||
nodes = append(nodes, version.NodeID)
|
nodes = append(nodes, version.NodeID)
|
||||||
}
|
}
|
||||||
if view.GrowingSegments.Contain(segmentID) {
|
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||||
nodes = append(nodes, leaderID)
|
nodes = append(nodes, leaderID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -135,7 +140,7 @@ func (mgr *LeaderViewManager) GetGrowingSegmentDist(segmentID int64) []int64 {
|
|||||||
nodes := make([]int64, 0)
|
nodes := make([]int64, 0)
|
||||||
for leaderID, views := range mgr.views {
|
for leaderID, views := range mgr.views {
|
||||||
for _, view := range views {
|
for _, view := range views {
|
||||||
if view.GrowingSegments.Contain(segmentID) {
|
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||||
nodes = append(nodes, leaderID)
|
nodes = append(nodes, leaderID)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -151,7 +156,7 @@ func (mgr *LeaderViewManager) GetLeadersByGrowingSegment(segmentID int64) *Leade
|
|||||||
|
|
||||||
for _, views := range mgr.views {
|
for _, views := range mgr.views {
|
||||||
for _, view := range views {
|
for _, view := range views {
|
||||||
if view.GrowingSegments.Contain(segmentID) {
|
if _, ok := view.GrowingSegments[segmentID]; ok {
|
||||||
return view
|
return view
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -159,6 +164,25 @@ func (mgr *LeaderViewManager) GetLeadersByGrowingSegment(segmentID int64) *Leade
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetGrowingSegmentDistByCollectionAndNode returns all segments of the given collection and node.
|
||||||
|
func (mgr *LeaderViewManager) GetGrowingSegmentDistByCollectionAndNode(collectionID, nodeID int64) map[int64]*Segment {
|
||||||
|
mgr.rwmutex.RLock()
|
||||||
|
defer mgr.rwmutex.RUnlock()
|
||||||
|
|
||||||
|
segments := make(map[int64]*Segment, 0)
|
||||||
|
if viewsOnNode, ok := mgr.views[nodeID]; ok {
|
||||||
|
for _, view := range viewsOnNode {
|
||||||
|
if view.CollectionID == collectionID {
|
||||||
|
for ID, segment := range view.GrowingSegments {
|
||||||
|
segments[ID] = segment
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
// GetSegmentDist returns the list of nodes the given segment on
|
// GetSegmentDist returns the list of nodes the given segment on
|
||||||
func (mgr *LeaderViewManager) GetChannelDist(channel string) []int64 {
|
func (mgr *LeaderViewManager) GetChannelDist(channel string) []int64 {
|
||||||
mgr.rwmutex.RLock()
|
mgr.rwmutex.RLock()
|
||||||
|
|||||||
@ -19,10 +19,11 @@ package meta
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LeaderViewManagerSuite struct {
|
type LeaderViewManagerSuite struct {
|
||||||
@ -70,7 +71,7 @@ func (suite *LeaderViewManagerSuite) SetupSuite() {
|
|||||||
ID: int64(j),
|
ID: int64(j),
|
||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
Channel: channel,
|
Channel: channel,
|
||||||
GrowingSegments: typeutil.NewUniqueSet(suite.growingSegments[collection][channel]),
|
GrowingSegments: map[int64]*Segment{suite.growingSegments[collection][channel]: nil},
|
||||||
Segments: make(map[int64]*querypb.SegmentDist),
|
Segments: make(map[int64]*querypb.SegmentDist),
|
||||||
}
|
}
|
||||||
for k, segment := range suite.segments[collection] {
|
for k, segment := range suite.segments[collection] {
|
||||||
@ -163,8 +164,8 @@ func (suite *LeaderViewManagerSuite) AssertSegmentDist(segment int64, nodes []in
|
|||||||
for _, view := range views {
|
for _, view := range views {
|
||||||
version, ok := view.Segments[segment]
|
version, ok := view.Segments[segment]
|
||||||
if ok {
|
if ok {
|
||||||
if !suite.True(nodeSet.Contain(version.NodeID) ||
|
_, ok = view.GrowingSegments[version.NodeID]
|
||||||
version.NodeID == leader && view.GrowingSegments.Contain(version.NodeID)) {
|
if !suite.True(nodeSet.Contain(version.NodeID) || version.NodeID == leader && ok) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,10 +3,8 @@
|
|||||||
package meta
|
package meta
|
||||||
|
|
||||||
import (
|
import (
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
|
||||||
|
|
||||||
querypb "github.com/milvus-io/milvus/internal/proto/querypb"
|
querypb "github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockStore is an autogenerated mock type for the Store type
|
// MockStore is an autogenerated mock type for the Store type
|
||||||
@ -157,67 +155,6 @@ func (_c *MockStore_GetReplicas_Call) Return(_a0 []*querypb.Replica, _a1 error)
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadHandoffWithRevision provides a mock function with given fields:
|
|
||||||
func (_m *MockStore) LoadHandoffWithRevision() ([]string, []string, int64, error) {
|
|
||||||
ret := _m.Called()
|
|
||||||
|
|
||||||
var r0 []string
|
|
||||||
if rf, ok := ret.Get(0).(func() []string); ok {
|
|
||||||
r0 = rf()
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).([]string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var r1 []string
|
|
||||||
if rf, ok := ret.Get(1).(func() []string); ok {
|
|
||||||
r1 = rf()
|
|
||||||
} else {
|
|
||||||
if ret.Get(1) != nil {
|
|
||||||
r1 = ret.Get(1).([]string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var r2 int64
|
|
||||||
if rf, ok := ret.Get(2).(func() int64); ok {
|
|
||||||
r2 = rf()
|
|
||||||
} else {
|
|
||||||
r2 = ret.Get(2).(int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
var r3 error
|
|
||||||
if rf, ok := ret.Get(3).(func() error); ok {
|
|
||||||
r3 = rf()
|
|
||||||
} else {
|
|
||||||
r3 = ret.Error(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1, r2, r3
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockStore_LoadHandoffWithRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadHandoffWithRevision'
|
|
||||||
type MockStore_LoadHandoffWithRevision_Call struct {
|
|
||||||
*mock.Call
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadHandoffWithRevision is a helper method to define mock.On call
|
|
||||||
func (_e *MockStore_Expecter) LoadHandoffWithRevision() *MockStore_LoadHandoffWithRevision_Call {
|
|
||||||
return &MockStore_LoadHandoffWithRevision_Call{Call: _e.mock.On("LoadHandoffWithRevision")}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_LoadHandoffWithRevision_Call) Run(run func()) *MockStore_LoadHandoffWithRevision_Call {
|
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
|
||||||
run()
|
|
||||||
})
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_LoadHandoffWithRevision_Call) Return(_a0 []string, _a1 []string, _a2 int64, _a3 error) *MockStore_LoadHandoffWithRevision_Call {
|
|
||||||
_c.Call.Return(_a0, _a1, _a2, _a3)
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseCollection provides a mock function with given fields: id
|
// ReleaseCollection provides a mock function with given fields: id
|
||||||
func (_m *MockStore) ReleaseCollection(id int64) error {
|
func (_m *MockStore) ReleaseCollection(id int64) error {
|
||||||
ret := _m.Called(id)
|
ret := _m.Called(id)
|
||||||
@ -382,43 +319,6 @@ func (_c *MockStore_ReleaseReplicas_Call) Return(_a0 error) *MockStore_ReleaseRe
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveHandoffEvent provides a mock function with given fields: segmentInfo
|
|
||||||
func (_m *MockStore) RemoveHandoffEvent(segmentInfo *querypb.SegmentInfo) error {
|
|
||||||
ret := _m.Called(segmentInfo)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(*querypb.SegmentInfo) error); ok {
|
|
||||||
r0 = rf(segmentInfo)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockStore_RemoveHandoffEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveHandoffEvent'
|
|
||||||
type MockStore_RemoveHandoffEvent_Call struct {
|
|
||||||
*mock.Call
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveHandoffEvent is a helper method to define mock.On call
|
|
||||||
// - segmentInfo *querypb.SegmentInfo
|
|
||||||
func (_e *MockStore_Expecter) RemoveHandoffEvent(segmentInfo interface{}) *MockStore_RemoveHandoffEvent_Call {
|
|
||||||
return &MockStore_RemoveHandoffEvent_Call{Call: _e.mock.On("RemoveHandoffEvent", segmentInfo)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_RemoveHandoffEvent_Call) Run(run func(segmentInfo *querypb.SegmentInfo)) *MockStore_RemoveHandoffEvent_Call {
|
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
|
||||||
run(args[0].(*querypb.SegmentInfo))
|
|
||||||
})
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_RemoveHandoffEvent_Call) Return(_a0 error) *MockStore_RemoveHandoffEvent_Call {
|
|
||||||
_c.Call.Return(_a0)
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveCollection provides a mock function with given fields: info
|
// SaveCollection provides a mock function with given fields: info
|
||||||
func (_m *MockStore) SaveCollection(info *querypb.CollectionLoadInfo) error {
|
func (_m *MockStore) SaveCollection(info *querypb.CollectionLoadInfo) error {
|
||||||
ret := _m.Called(info)
|
ret := _m.Called(info)
|
||||||
@ -543,45 +443,6 @@ func (_c *MockStore_SaveReplica_Call) Return(_a0 error) *MockStore_SaveReplica_C
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchHandoffEvent provides a mock function with given fields: revision
|
|
||||||
func (_m *MockStore) WatchHandoffEvent(revision int64) clientv3.WatchChan {
|
|
||||||
ret := _m.Called(revision)
|
|
||||||
|
|
||||||
var r0 clientv3.WatchChan
|
|
||||||
if rf, ok := ret.Get(0).(func(int64) clientv3.WatchChan); ok {
|
|
||||||
r0 = rf(revision)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(clientv3.WatchChan)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockStore_WatchHandoffEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WatchHandoffEvent'
|
|
||||||
type MockStore_WatchHandoffEvent_Call struct {
|
|
||||||
*mock.Call
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchHandoffEvent is a helper method to define mock.On call
|
|
||||||
// - revision int64
|
|
||||||
func (_e *MockStore_Expecter) WatchHandoffEvent(revision interface{}) *MockStore_WatchHandoffEvent_Call {
|
|
||||||
return &MockStore_WatchHandoffEvent_Call{Call: _e.mock.On("WatchHandoffEvent", revision)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_WatchHandoffEvent_Call) Run(run func(revision int64)) *MockStore_WatchHandoffEvent_Call {
|
|
||||||
_c.Call.Run(func(args mock.Arguments) {
|
|
||||||
run(args[0].(int64))
|
|
||||||
})
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_c *MockStore_WatchHandoffEvent_Call) Return(_a0 clientv3.WatchChan) *MockStore_WatchHandoffEvent_Call {
|
|
||||||
_c.Call.Return(_a0)
|
|
||||||
return _c
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockConstructorTestingTNewMockStore interface {
|
type mockConstructorTestingTNewMockStore interface {
|
||||||
mock.TestingT
|
mock.TestingT
|
||||||
Cleanup(func())
|
Cleanup(func())
|
||||||
|
|||||||
@ -48,8 +48,6 @@ type WatchStoreChan = clientv3.WatchChan
|
|||||||
// Store is used to save and get from object storage.
|
// Store is used to save and get from object storage.
|
||||||
type Store interface {
|
type Store interface {
|
||||||
metastore.QueryCoordCatalog
|
metastore.QueryCoordCatalog
|
||||||
WatchHandoffEvent(revision int64) WatchStoreChan
|
|
||||||
LoadHandoffWithRevision() ([]string, []string, int64, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type metaStore struct {
|
type metaStore struct {
|
||||||
@ -195,19 +193,6 @@ func (s metaStore) ReleaseReplica(collection, replica int64) error {
|
|||||||
return s.cli.Remove(key)
|
return s.cli.Remove(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s metaStore) WatchHandoffEvent(revision int64) WatchStoreChan {
|
|
||||||
return s.cli.WatchWithRevision(util.HandoffSegmentPrefix, revision)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metaStore) RemoveHandoffEvent(info *querypb.SegmentInfo) error {
|
|
||||||
key := encodeHandoffEventKey(info.CollectionID, info.PartitionID, info.SegmentID)
|
|
||||||
return s.cli.Remove(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metaStore) LoadHandoffWithRevision() ([]string, []string, int64, error) {
|
|
||||||
return s.cli.LoadWithRevision(util.HandoffSegmentPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeCollectionLoadInfoKey(collection int64) string {
|
func encodeCollectionLoadInfoKey(collection int64) string {
|
||||||
return fmt.Sprintf("%s/%d", CollectionLoadInfoPrefix, collection)
|
return fmt.Sprintf("%s/%d", CollectionLoadInfoPrefix, collection)
|
||||||
}
|
}
|
||||||
|
|||||||
79
internal/querycoordv2/meta/target.go
Normal file
79
internal/querycoordv2/meta/target.go
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Licensed to the LF AI & Data foundation under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/samber/lo"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CollectionTarget collection target is immutable,
|
||||||
|
type CollectionTarget struct {
|
||||||
|
segments map[int64]*datapb.SegmentInfo
|
||||||
|
dmChannels map[string]*DmChannel
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCollectionTarget(segments map[int64]*datapb.SegmentInfo, dmChannels map[string]*DmChannel) *CollectionTarget {
|
||||||
|
return &CollectionTarget{
|
||||||
|
segments: segments,
|
||||||
|
dmChannels: dmChannels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CollectionTarget) GetAllSegments() map[int64]*datapb.SegmentInfo {
|
||||||
|
return p.segments
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CollectionTarget) GetAllDmChannels() map[string]*DmChannel {
|
||||||
|
return p.dmChannels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CollectionTarget) GetAllSegmentIDs() []int64 {
|
||||||
|
return lo.Keys(p.segments)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CollectionTarget) GetAllDmChannelNames() []string {
|
||||||
|
return lo.Keys(p.dmChannels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CollectionTarget) IsEmpty() bool {
|
||||||
|
return len(p.dmChannels) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type target struct {
|
||||||
|
// just maintain target at collection level
|
||||||
|
collectionTargetMap map[int64]*CollectionTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTarget() *target {
|
||||||
|
return &target{
|
||||||
|
collectionTargetMap: make(map[int64]*CollectionTarget),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *target) updateCollectionTarget(collectionID int64, target *CollectionTarget) {
|
||||||
|
t.collectionTargetMap[collectionID] = target
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *target) removeCollectionTarget(collectionID int64) {
|
||||||
|
delete(t.collectionTargetMap, collectionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *target) getCollectionTarget(collectionID int64) *CollectionTarget {
|
||||||
|
return t.collectionTargetMap[collectionID]
|
||||||
|
}
|
||||||
@ -17,192 +17,386 @@
|
|||||||
package meta
|
package meta
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TargetManager struct {
|
type TargetScope = int32
|
||||||
rwmutex sync.RWMutex
|
|
||||||
|
|
||||||
segments map[int64]*datapb.SegmentInfo
|
const (
|
||||||
dmChannels map[string]*DmChannel
|
CurrentTarget TargetScope = iota + 1
|
||||||
|
NextTarget
|
||||||
|
)
|
||||||
|
|
||||||
|
type TargetManager struct {
|
||||||
|
rwMutex sync.RWMutex
|
||||||
|
broker Broker
|
||||||
|
meta *Meta
|
||||||
|
|
||||||
|
// all read segment/channel operation happens on current -> only current target are visible to outer
|
||||||
|
// all add segment/channel operation happens on next -> changes can only happen on next target
|
||||||
|
// all remove segment/channel operation happens on Both current and next -> delete status should be consistent
|
||||||
|
current *target
|
||||||
|
next *target
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTargetManager() *TargetManager {
|
func NewTargetManager(broker Broker, meta *Meta) *TargetManager {
|
||||||
return &TargetManager{
|
return &TargetManager{
|
||||||
segments: make(map[int64]*datapb.SegmentInfo),
|
broker: broker,
|
||||||
dmChannels: make(map[string]*DmChannel),
|
meta: meta,
|
||||||
|
current: newTarget(),
|
||||||
|
next: newTarget(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) UpdateCollectionCurrentTarget(collectionID int64, partitionIDs ...int64) {
|
||||||
|
mgr.rwMutex.Lock()
|
||||||
|
defer mgr.rwMutex.Unlock()
|
||||||
|
log := log.With(zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64s("PartitionIDs", partitionIDs))
|
||||||
|
|
||||||
|
log.Info("start to update current target for collection")
|
||||||
|
|
||||||
|
newTarget := mgr.next.getCollectionTarget(collectionID)
|
||||||
|
if newTarget == nil || newTarget.IsEmpty() {
|
||||||
|
log.Info("next target does not exist, skip it")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mgr.current.updateCollectionTarget(collectionID, newTarget)
|
||||||
|
mgr.next.removeCollectionTarget(collectionID)
|
||||||
|
|
||||||
|
log.Info("finish to update current target for collection",
|
||||||
|
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||||
|
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCollectionNextTargetWithPartitions for collection_loading request, which offer partitionIDs outside
|
||||||
|
func (mgr *TargetManager) UpdateCollectionNextTargetWithPartitions(collectionID int64, partitionIDs ...int64) error {
|
||||||
|
mgr.rwMutex.Lock()
|
||||||
|
defer mgr.rwMutex.Unlock()
|
||||||
|
|
||||||
|
if len(partitionIDs) == 0 {
|
||||||
|
msg := "failed to update collection next target, due to no partition specified"
|
||||||
|
log.Warn(msg,
|
||||||
|
zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64s("partitionIDs", partitionIDs))
|
||||||
|
return errors.New(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCollectionNextTarget for collection_loaded request, which use partition info from meta or broker
|
||||||
|
func (mgr *TargetManager) UpdateCollectionNextTarget(collectionID int64) error {
|
||||||
|
mgr.rwMutex.Lock()
|
||||||
|
defer mgr.rwMutex.Unlock()
|
||||||
|
|
||||||
|
partitionIDs := make([]int64, 0)
|
||||||
|
collection := mgr.meta.GetCollection(collectionID)
|
||||||
|
if collection != nil {
|
||||||
|
var err error
|
||||||
|
partitionIDs, err = mgr.broker.GetPartitions(context.Background(), collectionID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
partitions := mgr.meta.GetPartitionsByCollection(collectionID)
|
||||||
|
if partitions != nil {
|
||||||
|
partitionIDs = lo.Map(partitions, func(partition *Partition, i int) int64 {
|
||||||
|
return partition.PartitionID
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mgr.updateCollectionNextTarget(collectionID, partitionIDs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) updateCollectionNextTarget(collectionID int64, partitionIDs ...int64) error {
|
||||||
|
log := log.With(zap.Int64("collectionID", collectionID))
|
||||||
|
|
||||||
|
log.Info("start to update next targets for collection")
|
||||||
|
newTarget, err := mgr.PullNextTarget(mgr.broker, collectionID, partitionIDs...)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to get next targets for collection",
|
||||||
|
zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mgr.next.updateCollectionTarget(collectionID, newTarget)
|
||||||
|
|
||||||
|
log.Info("finish to update next targets for collection",
|
||||||
|
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||||
|
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, partitionIDs ...int64) (*CollectionTarget, error) {
|
||||||
|
log.Info("start to pull next targets for partition",
|
||||||
|
zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64s("partitionIDs", partitionIDs))
|
||||||
|
|
||||||
|
channelInfos := make(map[string][]*datapb.VchannelInfo)
|
||||||
|
segments := make(map[int64]*datapb.SegmentInfo, 0)
|
||||||
|
for _, partitionID := range partitionIDs {
|
||||||
|
log.Debug("get recovery info...",
|
||||||
|
zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64("partitionID", partitionID))
|
||||||
|
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(context.TODO(), collectionID, partitionID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, binlog := range binlogs {
|
||||||
|
segments[binlog.GetSegmentID()] = &datapb.SegmentInfo{
|
||||||
|
ID: binlog.GetSegmentID(),
|
||||||
|
CollectionID: collectionID,
|
||||||
|
PartitionID: partitionID,
|
||||||
|
InsertChannel: binlog.GetInsertChannel(),
|
||||||
|
NumOfRows: binlog.GetNumOfRows(),
|
||||||
|
Binlogs: binlog.GetFieldBinlogs(),
|
||||||
|
Statslogs: binlog.GetStatslogs(),
|
||||||
|
Deltalogs: binlog.GetDeltalogs(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range vChannelInfos {
|
||||||
|
channelInfos[info.GetChannelName()] = append(channelInfos[info.GetChannelName()], info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dmChannels := make(map[string]*DmChannel)
|
||||||
|
for _, infos := range channelInfos {
|
||||||
|
merged := mgr.mergeDmChannelInfo(infos)
|
||||||
|
dmChannels[merged.GetChannelName()] = merged
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewCollectionTarget(segments, dmChannels), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) mergeDmChannelInfo(infos []*datapb.VchannelInfo) *DmChannel {
|
||||||
|
var dmChannel *DmChannel
|
||||||
|
|
||||||
|
for _, info := range infos {
|
||||||
|
if dmChannel == nil {
|
||||||
|
dmChannel = DmChannelFromVChannel(info)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.SeekPosition.GetTimestamp() < dmChannel.SeekPosition.GetTimestamp() {
|
||||||
|
dmChannel.SeekPosition = info.SeekPosition
|
||||||
|
}
|
||||||
|
dmChannel.DroppedSegmentIds = append(dmChannel.DroppedSegmentIds, info.DroppedSegmentIds...)
|
||||||
|
dmChannel.UnflushedSegmentIds = append(dmChannel.UnflushedSegmentIds, info.UnflushedSegmentIds...)
|
||||||
|
dmChannel.FlushedSegmentIds = append(dmChannel.FlushedSegmentIds, info.FlushedSegmentIds...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dmChannel
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveCollection removes all channels and segments in the given collection
|
// RemoveCollection removes all channels and segments in the given collection
|
||||||
func (mgr *TargetManager) RemoveCollection(collectionID int64) {
|
func (mgr *TargetManager) RemoveCollection(collectionID int64) {
|
||||||
mgr.rwmutex.Lock()
|
mgr.rwMutex.Lock()
|
||||||
defer mgr.rwmutex.Unlock()
|
defer mgr.rwMutex.Unlock()
|
||||||
|
log.Info("remove collection from targets",
|
||||||
|
zap.Int64("collectionID", collectionID))
|
||||||
|
|
||||||
log.Info("remove collection from targets")
|
mgr.current.removeCollectionTarget(collectionID)
|
||||||
for _, segment := range mgr.segments {
|
mgr.next.removeCollectionTarget(collectionID)
|
||||||
if segment.CollectionID == collectionID {
|
|
||||||
mgr.removeSegment(segment.GetID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, dmChannel := range mgr.dmChannels {
|
|
||||||
if dmChannel.CollectionID == collectionID {
|
|
||||||
mgr.removeDmChannel(dmChannel.GetChannelName())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePartition removes all segment in the given partition,
|
// RemovePartition removes all segment in the given partition,
|
||||||
// NOTE: this doesn't remove any channel even the given one is the only partition
|
// NOTE: this doesn't remove any channel even the given one is the only partition
|
||||||
func (mgr *TargetManager) RemovePartition(partitionID int64) {
|
func (mgr *TargetManager) RemovePartition(collectionID int64, partitionIDs ...int64) {
|
||||||
mgr.rwmutex.Lock()
|
mgr.rwMutex.Lock()
|
||||||
defer mgr.rwmutex.Unlock()
|
defer mgr.rwMutex.Unlock()
|
||||||
|
|
||||||
log.Info("remove partition from targets",
|
log := log.With(zap.Int64("collectionID", collectionID),
|
||||||
zap.Int64("partitionID", partitionID))
|
zap.Int64s("PartitionIDs", partitionIDs))
|
||||||
for _, segment := range mgr.segments {
|
|
||||||
if segment.GetPartitionID() == partitionID {
|
log.Info("remove partition from targets")
|
||||||
mgr.removeSegment(segment.GetID())
|
|
||||||
|
partitionSet := typeutil.NewUniqueSet(partitionIDs...)
|
||||||
|
|
||||||
|
oldCurrentTarget := mgr.current.getCollectionTarget(collectionID)
|
||||||
|
if oldCurrentTarget != nil {
|
||||||
|
newTarget := mgr.removePartitionFromCollectionTarget(oldCurrentTarget, partitionSet)
|
||||||
|
if newTarget != nil {
|
||||||
|
mgr.current.updateCollectionTarget(collectionID, newTarget)
|
||||||
|
log.Info("finish to remove partition from current target for collection",
|
||||||
|
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||||
|
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||||
|
} else {
|
||||||
|
log.Info("all partitions have been released, release the collection next target now")
|
||||||
|
mgr.current.removeCollectionTarget(collectionID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
oleNextTarget := mgr.next.getCollectionTarget(collectionID)
|
||||||
|
if oleNextTarget != nil {
|
||||||
|
newTarget := mgr.removePartitionFromCollectionTarget(oleNextTarget, partitionSet)
|
||||||
|
if newTarget != nil {
|
||||||
|
mgr.next.updateCollectionTarget(collectionID, newTarget)
|
||||||
|
log.Info("finish to remove partition from next target for collection",
|
||||||
|
zap.Int64s("segments", newTarget.GetAllSegmentIDs()),
|
||||||
|
zap.Strings("channels", newTarget.GetAllDmChannelNames()))
|
||||||
|
} else {
|
||||||
|
log.Info("all partitions have been released, release the collection current target now")
|
||||||
|
mgr.next.removeCollectionTarget(collectionID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) RemoveSegment(segmentID int64) {
|
func (mgr *TargetManager) removePartitionFromCollectionTarget(oldTarget *CollectionTarget, partitionSet typeutil.UniqueSet) *CollectionTarget {
|
||||||
mgr.rwmutex.Lock()
|
segments := make(map[int64]*datapb.SegmentInfo)
|
||||||
defer mgr.rwmutex.Unlock()
|
for _, segment := range oldTarget.GetAllSegments() {
|
||||||
|
if !partitionSet.Contain(segment.GetPartitionID()) {
|
||||||
delete(mgr.segments, segmentID)
|
segments[segment.GetID()] = segment
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) removeSegment(segmentID int64) {
|
|
||||||
delete(mgr.segments, segmentID)
|
|
||||||
log.Info("segment removed from targets", zap.Int64("segment", segmentID))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSegment adds segment into target set,
|
|
||||||
// requires CollectionID, PartitionID, InsertChannel, SegmentID are set
|
|
||||||
func (mgr *TargetManager) AddSegment(segments ...*datapb.SegmentInfo) {
|
|
||||||
mgr.rwmutex.Lock()
|
|
||||||
defer mgr.rwmutex.Unlock()
|
|
||||||
|
|
||||||
mgr.addSegment(segments...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) addSegment(segments ...*datapb.SegmentInfo) {
|
|
||||||
for _, segment := range segments {
|
|
||||||
log.Info("add segment into targets",
|
|
||||||
zap.Int64("segmentID", segment.GetID()),
|
|
||||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
|
||||||
)
|
|
||||||
mgr.segments[segment.GetID()] = segment
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear partition streaming segment
|
||||||
|
channels := make(map[string]*DmChannel)
|
||||||
|
for _, channel := range oldTarget.GetAllDmChannels() {
|
||||||
|
channels[channel.GetChannelName()] = channel
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewCollectionTarget(segments, channels)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) ContainSegment(id int64) bool {
|
func (mgr *TargetManager) removePartitionGrowingSegmentFromChannel(partitionIDSet typeutil.UniqueSet,
|
||||||
mgr.rwmutex.RLock()
|
oldChannel *DmChannel) *DmChannel {
|
||||||
defer mgr.rwmutex.RUnlock()
|
newChannel := oldChannel.Clone()
|
||||||
|
|
||||||
return mgr.containSegment(id)
|
notMatchPartition := func(s *datapb.SegmentInfo, _ int) bool {
|
||||||
|
return !partitionIDSet.Contain(s.GetPartitionID())
|
||||||
|
}
|
||||||
|
|
||||||
|
getSegmentID := func(s *datapb.SegmentInfo, _ int) int64 {
|
||||||
|
return s.GetID()
|
||||||
|
}
|
||||||
|
|
||||||
|
newChannel.UnflushedSegments = lo.Filter(newChannel.GetUnflushedSegments(), notMatchPartition)
|
||||||
|
newChannel.UnflushedSegmentIds = lo.Map(newChannel.GetUnflushedSegments(), getSegmentID)
|
||||||
|
newChannel.FlushedSegments = lo.Filter(newChannel.GetFlushedSegments(), notMatchPartition)
|
||||||
|
newChannel.FlushedSegmentIds = lo.Map(newChannel.GetFlushedSegments(), getSegmentID)
|
||||||
|
newChannel.DroppedSegments = lo.Filter(newChannel.GetDroppedSegments(), notMatchPartition)
|
||||||
|
newChannel.DroppedSegmentIds = lo.Map(newChannel.GetDroppedSegments(), getSegmentID)
|
||||||
|
|
||||||
|
return newChannel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) containSegment(id int64) bool {
|
func (mgr *TargetManager) getTarget(scope TargetScope) *target {
|
||||||
_, ok := mgr.segments[id]
|
if scope == CurrentTarget {
|
||||||
return ok
|
return mgr.current
|
||||||
|
}
|
||||||
|
|
||||||
|
return mgr.next
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) GetSegmentsByCollection(collection int64, partitions ...int64) []*datapb.SegmentInfo {
|
func (mgr *TargetManager) GetStreamingSegmentsByCollection(collectionID int64,
|
||||||
mgr.rwmutex.RLock()
|
scope TargetScope) typeutil.UniqueSet {
|
||||||
defer mgr.rwmutex.RUnlock()
|
mgr.rwMutex.RLock()
|
||||||
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
|
||||||
segments := make([]*datapb.SegmentInfo, 0)
|
targetMap := mgr.getTarget(scope)
|
||||||
for _, segment := range mgr.segments {
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
if segment.CollectionID == collection &&
|
|
||||||
(len(partitions) == 0 || funcutil.SliceContain(partitions, segment.PartitionID)) {
|
if collectionTarget == nil {
|
||||||
segments = append(segments, segment)
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
segments := typeutil.NewUniqueSet()
|
||||||
|
for _, channel := range collectionTarget.GetAllDmChannels() {
|
||||||
|
segments.Insert(channel.GetUnflushedSegmentIds()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) GetHistoricalSegmentsByCollection(collectionID int64,
|
||||||
|
scope TargetScope) map[int64]*datapb.SegmentInfo {
|
||||||
|
mgr.rwMutex.RLock()
|
||||||
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
|
||||||
|
targetMap := mgr.getTarget(scope)
|
||||||
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
|
|
||||||
|
if collectionTarget == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return collectionTarget.GetAllSegments()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) GetHistoricalSegmentsByPartition(collectionID int64,
|
||||||
|
partitionID int64, scope TargetScope) map[int64]*datapb.SegmentInfo {
|
||||||
|
mgr.rwMutex.RLock()
|
||||||
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
|
||||||
|
targetMap := mgr.getTarget(scope)
|
||||||
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
|
|
||||||
|
if collectionTarget == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
segments := make(map[int64]*datapb.SegmentInfo)
|
||||||
|
for _, s := range collectionTarget.GetAllSegments() {
|
||||||
|
if s.GetPartitionID() == partitionID {
|
||||||
|
segments[s.GetID()] = s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return segments
|
return segments
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) HandoffSegment(dest *datapb.SegmentInfo, sources ...int64) {
|
func (mgr *TargetManager) GetDmChannelsByCollection(collectionID int64, scope TargetScope) map[string]*DmChannel {
|
||||||
mgr.rwmutex.Lock()
|
mgr.rwMutex.RLock()
|
||||||
defer mgr.rwmutex.Unlock()
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
|
||||||
// add dest to target
|
targetMap := mgr.getTarget(scope)
|
||||||
dest.CompactionFrom = sources
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
mgr.addSegment(dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddDmChannel adds a channel into target set,
|
if collectionTarget == nil {
|
||||||
// requires CollectionID, ChannelName are set
|
|
||||||
func (mgr *TargetManager) AddDmChannel(channels ...*DmChannel) {
|
|
||||||
mgr.rwmutex.Lock()
|
|
||||||
defer mgr.rwmutex.Unlock()
|
|
||||||
|
|
||||||
for _, channel := range channels {
|
|
||||||
ts := channel.GetSeekPosition().GetTimestamp()
|
|
||||||
log.Info("add channel into targets",
|
|
||||||
zap.String("channel", channel.GetChannelName()),
|
|
||||||
zap.Uint64("checkpoint", ts),
|
|
||||||
zap.Duration("sinceCheckpoint", time.Since(tsoutil.PhysicalTime(ts))),
|
|
||||||
)
|
|
||||||
mgr.dmChannels[channel.ChannelName] = channel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) GetDmChannel(channel string) *DmChannel {
|
|
||||||
mgr.rwmutex.RLock()
|
|
||||||
defer mgr.rwmutex.RUnlock()
|
|
||||||
for _, ch := range mgr.dmChannels {
|
|
||||||
if ch.ChannelName == channel {
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
return collectionTarget.GetAllDmChannels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *TargetManager) ContainDmChannel(channel string) bool {
|
func (mgr *TargetManager) GetDmChannel(collectionID int64, channel string, scope TargetScope) *DmChannel {
|
||||||
mgr.rwmutex.RLock()
|
mgr.rwMutex.RLock()
|
||||||
defer mgr.rwmutex.RUnlock()
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
|
||||||
_, ok := mgr.dmChannels[channel]
|
targetMap := mgr.getTarget(scope)
|
||||||
return ok
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) removeDmChannel(channel string) {
|
if collectionTarget == nil {
|
||||||
delete(mgr.dmChannels, channel)
|
|
||||||
log.Info("remove channel from targets", zap.String("channel", channel))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) GetDmChannelsByCollection(collectionID int64) []*DmChannel {
|
|
||||||
mgr.rwmutex.RLock()
|
|
||||||
defer mgr.rwmutex.RUnlock()
|
|
||||||
|
|
||||||
channels := make([]*DmChannel, 0)
|
|
||||||
for _, channel := range mgr.dmChannels {
|
|
||||||
if channel.GetCollectionID() == collectionID {
|
|
||||||
channels = append(channels, channel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return channels
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mgr *TargetManager) GetSegment(id int64) *datapb.SegmentInfo {
|
|
||||||
mgr.rwmutex.RLock()
|
|
||||||
defer mgr.rwmutex.RUnlock()
|
|
||||||
|
|
||||||
for _, s := range mgr.segments {
|
|
||||||
if s.GetID() == id {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
return collectionTarget.GetAllDmChannels()[channel]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) GetHistoricalSegment(collectionID int64, id int64, scope TargetScope) *datapb.SegmentInfo {
|
||||||
|
mgr.rwMutex.RLock()
|
||||||
|
defer mgr.rwMutex.RUnlock()
|
||||||
|
targetMap := mgr.getTarget(scope)
|
||||||
|
collectionTarget := targetMap.getCollectionTarget(collectionID)
|
||||||
|
|
||||||
|
if collectionTarget == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return collectionTarget.GetAllSegments()[id]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *TargetManager) IsNextTargetExist(collectionID int64) bool {
|
||||||
|
newChannels := mgr.GetDmChannelsByCollection(collectionID, NextTarget)
|
||||||
|
|
||||||
|
return len(newChannels) > 0
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,9 +19,16 @@ package meta
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
"github.com/samber/lo"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TargetManagerSuite struct {
|
type TargetManagerSuite struct {
|
||||||
@ -36,11 +43,15 @@ type TargetManagerSuite struct {
|
|||||||
allChannels []string
|
allChannels []string
|
||||||
allSegments []int64
|
allSegments []int64
|
||||||
|
|
||||||
|
kv *etcdkv.EtcdKV
|
||||||
|
meta *Meta
|
||||||
|
broker *MockBroker
|
||||||
// Test object
|
// Test object
|
||||||
mgr *TargetManager
|
mgr *TargetManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) SetupSuite() {
|
func (suite *TargetManagerSuite) SetupSuite() {
|
||||||
|
Params.Init()
|
||||||
suite.collections = []int64{1000, 1001}
|
suite.collections = []int64{1000, 1001}
|
||||||
suite.partitions = map[int64][]int64{
|
suite.partitions = map[int64][]int64{
|
||||||
1000: {100, 101},
|
1000: {100, 101},
|
||||||
@ -74,81 +85,171 @@ func (suite *TargetManagerSuite) SetupSuite() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) SetupTest() {
|
func (suite *TargetManagerSuite) SetupTest() {
|
||||||
suite.mgr = NewTargetManager()
|
var err error
|
||||||
for collection, channels := range suite.channels {
|
config := GenerateEtcdConfig()
|
||||||
for _, channel := range channels {
|
cli, err := etcd.GetEtcdClient(&config)
|
||||||
suite.mgr.AddDmChannel(DmChannelFromVChannel(&datapb.VchannelInfo{
|
suite.Require().NoError(err)
|
||||||
|
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||||
|
|
||||||
|
// meta
|
||||||
|
store := NewMetaStore(suite.kv)
|
||||||
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
|
suite.meta = NewMeta(idAllocator, store)
|
||||||
|
suite.broker = NewMockBroker(suite.T())
|
||||||
|
suite.mgr = NewTargetManager(suite.broker, suite.meta)
|
||||||
|
|
||||||
|
for _, collection := range suite.collections {
|
||||||
|
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||||
|
for _, channel := range suite.channels[collection] {
|
||||||
|
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
ChannelName: channel,
|
ChannelName: channel,
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for collection, partitions := range suite.segments {
|
|
||||||
for partition, segments := range partitions {
|
|
||||||
for _, segment := range segments {
|
|
||||||
suite.mgr.AddSegment(&datapb.SegmentInfo{
|
|
||||||
ID: segment,
|
|
||||||
CollectionID: collection,
|
|
||||||
PartitionID: partition,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) TestGet() {
|
for partition, segments := range suite.segments[collection] {
|
||||||
mgr := suite.mgr
|
allSegments := make([]*datapb.SegmentBinlogs, 0)
|
||||||
|
|
||||||
for collection, channels := range suite.channels {
|
|
||||||
results := mgr.GetDmChannelsByCollection(collection)
|
|
||||||
suite.assertChannels(channels, results)
|
|
||||||
for _, channel := range channels {
|
|
||||||
suite.True(mgr.ContainDmChannel(channel))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for collection, partitions := range suite.segments {
|
|
||||||
collectionSegments := make([]int64, 0)
|
|
||||||
for partition, segments := range partitions {
|
|
||||||
results := mgr.GetSegmentsByCollection(collection, partition)
|
|
||||||
suite.assertSegments(segments, results)
|
|
||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
suite.True(mgr.ContainSegment(segment))
|
|
||||||
|
allSegments = append(allSegments, &datapb.SegmentBinlogs{
|
||||||
|
SegmentID: segment,
|
||||||
|
InsertChannel: suite.channels[collection][0],
|
||||||
|
})
|
||||||
}
|
}
|
||||||
collectionSegments = append(collectionSegments, segments...)
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, partition).Return(dmChannels, allSegments, nil)
|
||||||
}
|
}
|
||||||
results := mgr.GetSegmentsByCollection(collection)
|
|
||||||
suite.assertSegments(collectionSegments, results)
|
suite.mgr.UpdateCollectionNextTargetWithPartitions(collection, suite.partitions[collection]...)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) TestRemove() {
|
func (suite *TargetManagerSuite) TearDownSuite() {
|
||||||
mgr := suite.mgr
|
suite.kv.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetManagerSuite) TestUpdateCurrentTarget() {
|
||||||
|
collectionID := int64(1000)
|
||||||
|
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||||
|
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]),
|
||||||
|
suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetManagerSuite) TestUpdateNextTarget() {
|
||||||
|
collectionID := int64(1003)
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
suite.meta.PutCollection(&Collection{
|
||||||
|
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||||
|
CollectionID: collectionID,
|
||||||
|
ReplicaNumber: 1},
|
||||||
|
})
|
||||||
|
suite.meta.PutPartition(&Partition{
|
||||||
|
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||||
|
CollectionID: collectionID,
|
||||||
|
PartitionID: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
nextTargetChannels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: collectionID,
|
||||||
|
ChannelName: "channel-1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CollectionID: collectionID,
|
||||||
|
ChannelName: "channel-2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
nextTargetSegments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 11,
|
||||||
|
InsertChannel: "channel-1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SegmentID: 12,
|
||||||
|
InsertChannel: "channel-2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collectionID, int64(1)).Return(nextTargetChannels, nextTargetSegments, nil)
|
||||||
|
suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1))
|
||||||
|
suite.assertSegments([]int64{11, 12}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{"channel-1", "channel-2"}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetManagerSuite) TestRemovePartition() {
|
||||||
|
collectionID := int64(1000)
|
||||||
|
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
suite.mgr.RemovePartition(collectionID, 100)
|
||||||
|
suite.assertSegments([]int64{3, 4}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetManagerSuite) TestRemoveCollection() {
|
||||||
|
collectionID := int64(1000)
|
||||||
|
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
suite.mgr.RemoveCollection(collectionID)
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
collectionID = int64(1001)
|
||||||
|
suite.mgr.UpdateCollectionCurrentTarget(collectionID)
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments(suite.getAllSegment(collectionID, suite.partitions[collectionID]), suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels(suite.channels[collectionID], suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
|
||||||
|
suite.mgr.RemoveCollection(collectionID)
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, NextTarget))
|
||||||
|
suite.assertSegments([]int64{}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, CurrentTarget))
|
||||||
|
suite.assertChannels([]string{}, suite.mgr.GetDmChannelsByCollection(collectionID, CurrentTarget))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetManagerSuite) getAllSegment(collectionID int64, partitionIDs []int64) []int64 {
|
||||||
|
allSegments := make([]int64, 0)
|
||||||
for collection, partitions := range suite.segments {
|
for collection, partitions := range suite.segments {
|
||||||
// Remove first segment of each partition
|
if collectionID == collection {
|
||||||
for _, segments := range partitions {
|
for partition, segments := range partitions {
|
||||||
mgr.RemoveSegment(segments[0])
|
if lo.Contains(partitionIDs, partition) {
|
||||||
suite.False(mgr.ContainSegment(segments[0]))
|
allSegments = append(allSegments, segments...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove first partition of each collection
|
return allSegments
|
||||||
firstPartition := suite.partitions[collection][0]
|
|
||||||
mgr.RemovePartition(firstPartition)
|
|
||||||
segments := mgr.GetSegmentsByCollection(collection, firstPartition)
|
|
||||||
suite.Empty(segments)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove first collection
|
|
||||||
firstCollection := suite.collections[0]
|
|
||||||
mgr.RemoveCollection(firstCollection)
|
|
||||||
channels := mgr.GetDmChannelsByCollection(firstCollection)
|
|
||||||
suite.Empty(channels)
|
|
||||||
segments := mgr.GetSegmentsByCollection(firstCollection)
|
|
||||||
suite.Empty(segments)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) assertChannels(expected []string, actual []*DmChannel) bool {
|
func (suite *TargetManagerSuite) assertChannels(expected []string, actual map[string]*DmChannel) bool {
|
||||||
if !suite.Equal(len(expected), len(actual)) {
|
if !suite.Equal(len(expected), len(actual)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -161,7 +262,7 @@ func (suite *TargetManagerSuite) assertChannels(expected []string, actual []*DmC
|
|||||||
return suite.Len(set, 0)
|
return suite.Len(set, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *TargetManagerSuite) assertSegments(expected []int64, actual []*datapb.SegmentInfo) bool {
|
func (suite *TargetManagerSuite) assertSegments(expected []int64, actual map[int64]*datapb.SegmentInfo) bool {
|
||||||
if !suite.Equal(len(expected), len(actual)) {
|
if !suite.Equal(len(expected), len(actual)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@ -154,19 +154,20 @@ func (ob *CollectionObserver) observeLoadStatus() {
|
|||||||
func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Collection) {
|
func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Collection) {
|
||||||
log := log.With(zap.Int64("collectionID", collection.GetCollectionID()))
|
log := log.With(zap.Int64("collectionID", collection.GetCollectionID()))
|
||||||
|
|
||||||
segmentTargets := ob.targetMgr.GetSegmentsByCollection(collection.GetCollectionID())
|
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByCollection(collection.GetCollectionID(), meta.NextTarget)
|
||||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(collection.GetCollectionID())
|
channelTargets := ob.targetMgr.GetDmChannelsByCollection(collection.GetCollectionID(), meta.NextTarget)
|
||||||
targetNum := len(segmentTargets) + len(channelTargets)
|
targetNum := len(segmentTargets) + len(channelTargets)
|
||||||
log.Info("collection targets",
|
log.Info("collection targets",
|
||||||
zap.Int("segment-target-num", len(segmentTargets)),
|
zap.Int("segment-target-num", len(segmentTargets)),
|
||||||
zap.Int("channel-target-num", len(channelTargets)),
|
zap.Int("channel-target-num", len(channelTargets)),
|
||||||
zap.Int("total-target-num", targetNum))
|
zap.Int("total-target-num", targetNum))
|
||||||
if targetNum == 0 {
|
|
||||||
log.Info("collection released, skip it")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
|
updated := collection.Clone()
|
||||||
loadedCount := 0
|
loadedCount := 0
|
||||||
|
if targetNum == 0 {
|
||||||
|
log.Info("No segment/channel in target need to be loaded!")
|
||||||
|
updated.LoadPercentage = 100
|
||||||
|
} else {
|
||||||
for _, channel := range channelTargets {
|
for _, channel := range channelTargets {
|
||||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||||
collection.GetCollectionID(),
|
collection.GetCollectionID(),
|
||||||
@ -191,12 +192,15 @@ func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Colle
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
updated := collection.Clone()
|
|
||||||
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
||||||
|
}
|
||||||
|
|
||||||
if updated.LoadPercentage <= collection.LoadPercentage {
|
if updated.LoadPercentage <= collection.LoadPercentage {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if loadedCount >= len(segmentTargets)+len(channelTargets) {
|
|
||||||
|
if loadedCount >= targetNum {
|
||||||
|
ob.targetMgr.UpdateCollectionCurrentTarget(updated.CollectionID)
|
||||||
updated.Status = querypb.LoadStatus_Loaded
|
updated.Status = querypb.LoadStatus_Loaded
|
||||||
ob.meta.CollectionManager.UpdateCollection(updated)
|
ob.meta.CollectionManager.UpdateCollection(updated)
|
||||||
|
|
||||||
@ -216,19 +220,20 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti
|
|||||||
zap.Int64("partitionID", partition.GetPartitionID()),
|
zap.Int64("partitionID", partition.GetPartitionID()),
|
||||||
)
|
)
|
||||||
|
|
||||||
segmentTargets := ob.targetMgr.GetSegmentsByCollection(partition.GetCollectionID(), partition.GetPartitionID())
|
segmentTargets := ob.targetMgr.GetHistoricalSegmentsByPartition(partition.GetCollectionID(), partition.GetPartitionID(), meta.NextTarget)
|
||||||
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID())
|
channelTargets := ob.targetMgr.GetDmChannelsByCollection(partition.GetCollectionID(), meta.NextTarget)
|
||||||
targetNum := len(segmentTargets) + len(channelTargets)
|
targetNum := len(segmentTargets) + len(channelTargets)
|
||||||
log.Info("partition targets",
|
log.Info("partition targets",
|
||||||
zap.Int("segment-target-num", len(segmentTargets)),
|
zap.Int("segment-target-num", len(segmentTargets)),
|
||||||
zap.Int("channel-target-num", len(channelTargets)),
|
zap.Int("channel-target-num", len(channelTargets)),
|
||||||
zap.Int("total-target-num", targetNum))
|
zap.Int("total-target-num", targetNum))
|
||||||
if targetNum == 0 {
|
|
||||||
log.Info("partition released, skip it")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
loadedCount := 0
|
loadedCount := 0
|
||||||
|
updated := partition.Clone()
|
||||||
|
if targetNum == 0 {
|
||||||
|
log.Info("No segment/channel in target need to be loaded!")
|
||||||
|
updated.LoadPercentage = 100
|
||||||
|
} else {
|
||||||
for _, channel := range channelTargets {
|
for _, channel := range channelTargets {
|
||||||
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||||
partition.GetCollectionID(),
|
partition.GetCollectionID(),
|
||||||
@ -251,15 +256,18 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti
|
|||||||
zap.Int("sub-channel-count", subChannelCount),
|
zap.Int("sub-channel-count", subChannelCount),
|
||||||
zap.Int("load-segment-count", loadedCount-subChannelCount))
|
zap.Int("load-segment-count", loadedCount-subChannelCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
updated := partition.Clone()
|
|
||||||
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
updated.LoadPercentage = int32(loadedCount * 100 / targetNum)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if updated.LoadPercentage <= partition.LoadPercentage {
|
if updated.LoadPercentage <= partition.LoadPercentage {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if loadedCount >= len(segmentTargets)+len(channelTargets) {
|
|
||||||
|
if loadedCount >= targetNum {
|
||||||
|
ob.targetMgr.UpdateCollectionCurrentTarget(partition.GetCollectionID(), partition.GetPartitionID())
|
||||||
updated.Status = querypb.LoadStatus_Loaded
|
updated.Status = querypb.LoadStatus_Loaded
|
||||||
ob.meta.CollectionManager.UpdatePartition(updated)
|
ob.meta.CollectionManager.PutPartition(updated)
|
||||||
|
|
||||||
elapsed := time.Since(updated.CreatedAt)
|
elapsed := time.Since(updated.CreatedAt)
|
||||||
metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(elapsed.Milliseconds()))
|
metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(elapsed.Milliseconds()))
|
||||||
|
|||||||
@ -21,6 +21,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/kv"
|
"github.com/milvus-io/milvus/internal/kv"
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
@ -29,8 +33,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type CollectionObserverSuite struct {
|
type CollectionObserverSuite struct {
|
||||||
@ -51,6 +53,7 @@ type CollectionObserverSuite struct {
|
|||||||
etcd *clientv3.Client
|
etcd *clientv3.Client
|
||||||
kv kv.MetaKv
|
kv kv.MetaKv
|
||||||
store meta.Store
|
store meta.Store
|
||||||
|
broker *meta.MockBroker
|
||||||
|
|
||||||
// Dependencies
|
// Dependencies
|
||||||
dist *meta.DistributionManager
|
dist *meta.DistributionManager
|
||||||
@ -152,7 +155,8 @@ func (suite *CollectionObserverSuite) SetupTest() {
|
|||||||
// Dependencies
|
// Dependencies
|
||||||
suite.dist = meta.NewDistributionManager()
|
suite.dist = meta.NewDistributionManager()
|
||||||
suite.meta = meta.NewMeta(suite.idAllocator, suite.store)
|
suite.meta = meta.NewMeta(suite.idAllocator, suite.store)
|
||||||
suite.targetMgr = meta.NewTargetManager()
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
|
|
||||||
// Test object
|
// Test object
|
||||||
suite.ob = NewCollectionObserver(
|
suite.ob = NewCollectionObserver(
|
||||||
@ -192,8 +196,11 @@ func (suite *CollectionObserverSuite) TestObserve() {
|
|||||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}},
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}},
|
||||||
})
|
})
|
||||||
suite.Eventually(func() bool {
|
suite.Eventually(func() bool {
|
||||||
return suite.isCollectionLoaded(suite.collections[0]) &&
|
return suite.isCollectionLoaded(suite.collections[0])
|
||||||
suite.isCollectionTimeout(suite.collections[1])
|
}, timeout*2, timeout/10)
|
||||||
|
|
||||||
|
suite.Eventually(func() bool {
|
||||||
|
return suite.isCollectionTimeout(suite.collections[1])
|
||||||
}, timeout*2, timeout/10)
|
}, timeout*2, timeout/10)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,8 +209,8 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool
|
|||||||
percentage := suite.meta.GetLoadPercentage(collection)
|
percentage := suite.meta.GetLoadPercentage(collection)
|
||||||
status := suite.meta.GetStatus(collection)
|
status := suite.meta.GetStatus(collection)
|
||||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection)
|
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||||
segments := suite.targetMgr.GetSegmentsByCollection(collection)
|
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||||
|
|
||||||
return exist &&
|
return exist &&
|
||||||
percentage == 100 &&
|
percentage == 100 &&
|
||||||
@ -216,8 +223,8 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool
|
|||||||
func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool {
|
func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool {
|
||||||
exist := suite.meta.Exist(collection)
|
exist := suite.meta.Exist(collection)
|
||||||
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
replicas := suite.meta.ReplicaManager.GetByCollection(collection)
|
||||||
channels := suite.targetMgr.GetDmChannelsByCollection(collection)
|
channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget)
|
||||||
segments := suite.targetMgr.GetSegmentsByCollection(collection)
|
segments := suite.targetMgr.GetHistoricalSegmentsByCollection(collection, meta.CurrentTarget)
|
||||||
|
|
||||||
return !(exist ||
|
return !(exist ||
|
||||||
len(replicas) > 0 ||
|
len(replicas) > 0 ||
|
||||||
@ -229,6 +236,7 @@ func (suite *CollectionObserverSuite) loadAll() {
|
|||||||
for _, collection := range suite.collections {
|
for _, collection := range suite.collections {
|
||||||
suite.load(collection)
|
suite.load(collection)
|
||||||
}
|
}
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(suite.collections[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CollectionObserverSuite) load(collection int64) {
|
func (suite *CollectionObserverSuite) load(collection int64) {
|
||||||
@ -266,8 +274,24 @@ func (suite *CollectionObserverSuite) load(collection int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
suite.targetMgr.AddDmChannel(suite.channels[collection]...)
|
allSegments := make([]*datapb.SegmentBinlogs, 0)
|
||||||
suite.targetMgr.AddSegment(suite.segments[collection]...)
|
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||||
|
for _, channel := range suite.channels[collection] {
|
||||||
|
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||||
|
CollectionID: collection,
|
||||||
|
ChannelName: channel.GetChannelName(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, segment := range suite.segments[collection] {
|
||||||
|
allSegments = append(allSegments, &datapb.SegmentBinlogs{
|
||||||
|
SegmentID: segment.GetID(),
|
||||||
|
InsertChannel: segment.GetInsertChannel(),
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, int64(1)).Return(dmChannels, allSegments, nil)
|
||||||
|
suite.targetMgr.UpdateCollectionNextTargetWithPartitions(collection, int64(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCollectionObserver(t *testing.T) {
|
func TestCollectionObserver(t *testing.T) {
|
||||||
|
|||||||
@ -1,510 +0,0 @@
|
|||||||
// Licensed to the LF AI & Data foundation under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package observers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/retry"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
|
||||||
"github.com/samber/lo"
|
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CollectionHandoffStatus int32
|
|
||||||
type HandoffEventStatus int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// CollectionHandoffStatusRegistered start receive handoff event
|
|
||||||
CollectionHandoffStatusRegistered CollectionHandoffStatus = iota + 1
|
|
||||||
// CollectionHandoffStatusStarted start trigger handoff event
|
|
||||||
CollectionHandoffStatusStarted
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
HandoffEventStatusReceived HandoffEventStatus = iota + 1
|
|
||||||
HandoffEventStatusTriggered
|
|
||||||
)
|
|
||||||
|
|
||||||
type HandoffEvent struct {
|
|
||||||
Segment *querypb.SegmentInfo
|
|
||||||
Status HandoffEventStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
type queue []int64
|
|
||||||
|
|
||||||
type HandoffObserver struct {
|
|
||||||
store meta.Store
|
|
||||||
c chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
meta *meta.Meta
|
|
||||||
dist *meta.DistributionManager
|
|
||||||
target *meta.TargetManager
|
|
||||||
broker meta.Broker
|
|
||||||
revision int64
|
|
||||||
|
|
||||||
collectionStatus map[int64]CollectionHandoffStatus
|
|
||||||
handoffEventLock sync.RWMutex
|
|
||||||
handoffEvents map[int64]*HandoffEvent
|
|
||||||
// collection id -> queue
|
|
||||||
handoffSubmitOrders map[int64]queue
|
|
||||||
// collectionId -> loaded partitionId, only for load collection case
|
|
||||||
loadedPartitions map[int64]typeutil.UniqueSet
|
|
||||||
|
|
||||||
stopOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHandoffObserver(store meta.Store, meta *meta.Meta, dist *meta.DistributionManager, target *meta.TargetManager, broker meta.Broker) *HandoffObserver {
|
|
||||||
return &HandoffObserver{
|
|
||||||
store: store,
|
|
||||||
c: make(chan struct{}),
|
|
||||||
meta: meta,
|
|
||||||
dist: dist,
|
|
||||||
target: target,
|
|
||||||
broker: broker,
|
|
||||||
collectionStatus: map[int64]CollectionHandoffStatus{},
|
|
||||||
handoffEvents: map[int64]*HandoffEvent{},
|
|
||||||
handoffSubmitOrders: map[int64]queue{},
|
|
||||||
loadedPartitions: map[int64]typeutil.Set[int64]{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) Register(collectionIDs ...int64) {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
log.Info("Register handoff for collection",
|
|
||||||
zap.Int64s("collectionIDs", collectionIDs))
|
|
||||||
|
|
||||||
for _, collectionID := range collectionIDs {
|
|
||||||
ob.collectionStatus[collectionID] = CollectionHandoffStatusRegistered
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) Unregister(ctx context.Context, collectionIDs ...int64) {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
log.Info("Unregister handoff for collection",
|
|
||||||
zap.Int64s("collectionIDs", collectionIDs))
|
|
||||||
|
|
||||||
for _, collectionID := range collectionIDs {
|
|
||||||
delete(ob.collectionStatus, collectionID)
|
|
||||||
delete(ob.handoffSubmitOrders, collectionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
collectionSet := typeutil.NewUniqueSet(collectionIDs...)
|
|
||||||
for segmentID, event := range ob.handoffEvents {
|
|
||||||
if collectionSet.Contain(event.Segment.GetCollectionID()) {
|
|
||||||
delete(ob.handoffEvents, segmentID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) StartHandoff(collectionIDs ...int64) {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
|
|
||||||
for _, collectionID := range collectionIDs {
|
|
||||||
ob.collectionStatus[collectionID] = CollectionHandoffStatusStarted
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) GetEventNum() int {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
|
|
||||||
return len(ob.handoffEvents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) consumeOutdatedHandoffEvent(ctx context.Context) error {
|
|
||||||
_, handoffReqValues, revision, err := ob.store.LoadHandoffWithRevision()
|
|
||||||
if err != nil {
|
|
||||||
log.Error("reloadFromKV: LoadWithRevision from kv failed", zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// set watch start revision
|
|
||||||
ob.revision = revision
|
|
||||||
|
|
||||||
for _, value := range handoffReqValues {
|
|
||||||
segmentInfo := &querypb.SegmentInfo{}
|
|
||||||
err := proto.Unmarshal([]byte(value), segmentInfo)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("reloadFromKV: unmarshal failed", zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ob.cleanEvent(ctx, segmentInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) Start(ctx context.Context) error {
|
|
||||||
log.Info("Start reload handoff event from etcd")
|
|
||||||
if err := ob.consumeOutdatedHandoffEvent(ctx); err != nil {
|
|
||||||
log.Error("handoff observer reload from kv failed", zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Info("Finish reload handoff event from etcd")
|
|
||||||
|
|
||||||
ob.wg.Add(1)
|
|
||||||
go ob.schedule(ctx)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) Stop() {
|
|
||||||
ob.stopOnce.Do(func() {
|
|
||||||
close(ob.c)
|
|
||||||
ob.wg.Wait()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) schedule(ctx context.Context) {
|
|
||||||
defer ob.wg.Done()
|
|
||||||
log.Info("start watch Segment handoff loop")
|
|
||||||
ticker := time.NewTicker(Params.QueryCoordCfg.CheckHandoffInterval)
|
|
||||||
log.Info("handoff interval", zap.String("interval", Params.QueryCoordCfg.CheckHandoffInterval.String()))
|
|
||||||
watchChan := ob.store.WatchHandoffEvent(ob.revision + 1)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
log.Info("close handoff handler due to context done!")
|
|
||||||
return
|
|
||||||
case <-ob.c:
|
|
||||||
log.Info("close handoff handler")
|
|
||||||
return
|
|
||||||
|
|
||||||
case resp, ok := <-watchChan:
|
|
||||||
if !ok {
|
|
||||||
log.Error("watch Segment handoff loop failed because watch channel is closed!")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := resp.Err(); err != nil {
|
|
||||||
log.Warn("receive error handoff event from etcd",
|
|
||||||
zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, event := range resp.Events {
|
|
||||||
segmentInfo := &querypb.SegmentInfo{}
|
|
||||||
err := proto.Unmarshal(event.Kv.Value, segmentInfo)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("failed to deserialize handoff event", zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch event.Type {
|
|
||||||
case mvccpb.PUT:
|
|
||||||
ob.tryHandoff(ctx, segmentInfo)
|
|
||||||
default:
|
|
||||||
log.Warn("HandoffObserver: receive event",
|
|
||||||
zap.String("type", event.Type.String()),
|
|
||||||
zap.String("key", string(event.Kv.Key)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ticker.C:
|
|
||||||
for _, event := range ob.handoffEvents {
|
|
||||||
switch event.Status {
|
|
||||||
case HandoffEventStatusReceived:
|
|
||||||
ob.tryHandoff(ctx, event.Segment)
|
|
||||||
case HandoffEventStatusTriggered:
|
|
||||||
ob.tryRelease(ctx, event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ob.tryClean(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) tryHandoff(ctx context.Context, segment *querypb.SegmentInfo) {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
|
|
||||||
indexIDs := lo.Map(segment.GetIndexInfos(), func(indexInfo *querypb.FieldIndexInfo, _ int) int64 { return indexInfo.GetIndexID() })
|
|
||||||
log := log.With(zap.Int64("collectionID", segment.GetCollectionID()),
|
|
||||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
|
||||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
|
||||||
zap.Bool("fake", segment.GetIsFake()),
|
|
||||||
zap.Int64s("indexIDs", indexIDs),
|
|
||||||
)
|
|
||||||
|
|
||||||
log.Info("try handoff segment...")
|
|
||||||
status, collectionRegistered := ob.collectionStatus[segment.GetCollectionID()]
|
|
||||||
if Params.QueryCoordCfg.AutoHandoff &&
|
|
||||||
collectionRegistered &&
|
|
||||||
ob.checkLoadStatus(segment) &&
|
|
||||||
(segment.GetIsFake() || ob.meta.CollectionManager.ContainAnyIndex(segment.GetCollectionID(), indexIDs...)) {
|
|
||||||
event := ob.handoffEvents[segment.SegmentID]
|
|
||||||
if event == nil {
|
|
||||||
// record submit order
|
|
||||||
_, ok := ob.handoffSubmitOrders[segment.GetCollectionID()]
|
|
||||||
if !ok {
|
|
||||||
ob.handoffSubmitOrders[segment.GetCollectionID()] = make([]int64, 0)
|
|
||||||
}
|
|
||||||
ob.handoffSubmitOrders[segment.GetCollectionID()] = append(ob.handoffSubmitOrders[segment.GetCollectionID()], segment.GetSegmentID())
|
|
||||||
}
|
|
||||||
|
|
||||||
if status == CollectionHandoffStatusRegistered {
|
|
||||||
if event == nil {
|
|
||||||
// keep all handoff event, waiting collection ready and to trigger handoff
|
|
||||||
ob.handoffEvents[segment.GetSegmentID()] = &HandoffEvent{
|
|
||||||
Segment: segment,
|
|
||||||
Status: HandoffEventStatusReceived,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ob.handoffEvents[segment.GetSegmentID()] = &HandoffEvent{
|
|
||||||
Segment: segment,
|
|
||||||
Status: HandoffEventStatusTriggered,
|
|
||||||
}
|
|
||||||
|
|
||||||
if !segment.GetIsFake() {
|
|
||||||
log.Info("start to do handoff...")
|
|
||||||
ob.handoff(segment)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// ignore handoff task
|
|
||||||
log.Info("handoff event trigger failed due to collection/partition is not loaded!")
|
|
||||||
ob.cleanEvent(ctx, segment)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) checkLoadStatus(segment *querypb.SegmentInfo) bool {
|
|
||||||
if ob.meta.GetCollection(segment.GetCollectionID()) != nil {
|
|
||||||
// if collection is loaded, should check whether the partition has been droped!
|
|
||||||
if ob.loadedPartitions[segment.GetCollectionID()] == nil {
|
|
||||||
ob.loadedPartitions[segment.GetCollectionID()] = typeutil.NewUniqueSet()
|
|
||||||
}
|
|
||||||
|
|
||||||
// should updated loaded partitions when meet new partitionID
|
|
||||||
if !ob.loadedPartitions[segment.GetCollectionID()].Contain(segment.GetPartitionID()) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
err := retry.Do(ctx, func() error {
|
|
||||||
partitionIDs, err := ob.broker.GetPartitions(ctx, segment.GetCollectionID())
|
|
||||||
if err == nil {
|
|
||||||
ob.loadedPartitions[segment.GetCollectionID()].Insert(partitionIDs...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}, retry.Attempts(5))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// collection has been dropped or released
|
|
||||||
if strings.Contains(err.Error(), "CollectionNotExists") ||
|
|
||||||
ob.meta.GetCollection(segment.GetCollectionID()) == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// collection not released , but can get partition list to check handoff
|
|
||||||
log.Warn("handoff check load status failed due to get partitions failed",
|
|
||||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
|
||||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
|
||||||
zap.String("channel", segment.GetDmChannel()),
|
|
||||||
zap.Int64("segmentID", segment.GetSegmentID()))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ob.loadedPartitions[segment.GetCollectionID()].Contain(segment.GetPartitionID())
|
|
||||||
}
|
|
||||||
|
|
||||||
return ob.meta.GetPartition(segment.GetPartitionID()) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) handoff(segment *querypb.SegmentInfo) {
|
|
||||||
targets := ob.target.GetSegmentsByCollection(segment.GetCollectionID(), segment.GetPartitionID())
|
|
||||||
// when handoff event load a Segment, it sobuld remove all recursive handoff compact from
|
|
||||||
uniqueSet := typeutil.NewUniqueSet()
|
|
||||||
recursiveCompactFrom := ob.getOverrideSegmentInfo(targets, segment.CompactionFrom...)
|
|
||||||
uniqueSet.Insert(recursiveCompactFrom...)
|
|
||||||
uniqueSet.Insert(segment.GetCompactionFrom()...)
|
|
||||||
|
|
||||||
segmentInfo := &datapb.SegmentInfo{
|
|
||||||
ID: segment.GetSegmentID(),
|
|
||||||
CollectionID: segment.GetCollectionID(),
|
|
||||||
PartitionID: segment.GetPartitionID(),
|
|
||||||
NumOfRows: segment.NumRows,
|
|
||||||
InsertChannel: segment.GetDmChannel(),
|
|
||||||
State: segment.GetSegmentState(),
|
|
||||||
CreatedByCompaction: segment.GetCreatedByCompaction(),
|
|
||||||
CompactionFrom: uniqueSet.Collect(),
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("HandoffObserver: handoff Segment, register to target")
|
|
||||||
ob.target.HandoffSegment(segmentInfo, segmentInfo.CompactionFrom...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isSegmentReleased(id int64) bool {
|
|
||||||
return len(ob.dist.LeaderViewManager.GetSegmentDist(id)) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isGrowingSegmentReleased(id int64) bool {
|
|
||||||
return len(ob.dist.LeaderViewManager.GetGrowingSegmentDist(id)) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isSealedSegmentLoaded(segment *querypb.SegmentInfo) bool {
|
|
||||||
// must be sealed Segment loaded in all replica, in case of handoff between growing and sealed
|
|
||||||
nodes := ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetSegmentID())
|
|
||||||
replicas := utils.GroupNodesByReplica(ob.meta.ReplicaManager, segment.GetCollectionID(), nodes)
|
|
||||||
return len(replicas) == len(ob.meta.ReplicaManager.GetByCollection(segment.GetCollectionID()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) getOverrideSegmentInfo(handOffSegments []*datapb.SegmentInfo, segmentIDs ...int64) []int64 {
|
|
||||||
overrideSegments := make([]int64, 0)
|
|
||||||
for _, segmentID := range segmentIDs {
|
|
||||||
for _, segmentInHandoff := range handOffSegments {
|
|
||||||
if segmentID == segmentInHandoff.ID {
|
|
||||||
toReleaseSegments := ob.getOverrideSegmentInfo(handOffSegments, segmentInHandoff.CompactionFrom...)
|
|
||||||
if len(toReleaseSegments) > 0 {
|
|
||||||
overrideSegments = append(overrideSegments, toReleaseSegments...)
|
|
||||||
}
|
|
||||||
|
|
||||||
overrideSegments = append(overrideSegments, segmentID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return overrideSegments
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isAllCompactFromHandoffCompleted(segmentInfo *querypb.SegmentInfo) bool {
|
|
||||||
for _, segID := range segmentInfo.CompactionFrom {
|
|
||||||
_, ok := ob.handoffEvents[segID]
|
|
||||||
if ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) tryRelease(ctx context.Context, event *HandoffEvent) {
|
|
||||||
segment := event.Segment
|
|
||||||
|
|
||||||
if ob.isSealedSegmentLoaded(segment) || !ob.isSegmentExistOnTarget(segment) {
|
|
||||||
// Note: the fake segment will not add into target segments, in order to guarantee
|
|
||||||
// the all parent segments are released we check handoff events list instead of to
|
|
||||||
// check segment from the leader view, or might miss some segments to release.
|
|
||||||
if segment.GetIsFake() && !ob.isAllCompactFromHandoffCompleted(segment) {
|
|
||||||
log.Debug("try to release fake segments fails, due to the dependencies haven't complete handoff.",
|
|
||||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
|
||||||
zap.Bool("faked", segment.GetIsFake()),
|
|
||||||
zap.Int64s("sourceSegments", segment.CompactionFrom),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
compactSource := segment.CompactionFrom
|
|
||||||
if len(compactSource) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Info("remove compactFrom segments",
|
|
||||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
|
||||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
|
||||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
|
||||||
zap.Bool("faked", segment.GetIsFake()),
|
|
||||||
zap.Int64s("sourceSegments", compactSource),
|
|
||||||
)
|
|
||||||
for _, toRelease := range compactSource {
|
|
||||||
// when handoff happens between growing and sealed, both with same Segment id, so can't remove from target here
|
|
||||||
if segment.CreatedByCompaction {
|
|
||||||
ob.target.RemoveSegment(toRelease)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) tryClean(ctx context.Context) {
|
|
||||||
ob.handoffEventLock.Lock()
|
|
||||||
defer ob.handoffEventLock.Unlock()
|
|
||||||
|
|
||||||
for collectionID, partitionSubmitOrder := range ob.handoffSubmitOrders {
|
|
||||||
pos := 0
|
|
||||||
for _, segmentID := range partitionSubmitOrder {
|
|
||||||
event, ok := ob.handoffEvents[segmentID]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
segment := event.Segment
|
|
||||||
if ob.isAllCompactFromReleased(segment) {
|
|
||||||
log.Info("HandoffObserver: clean handoff event after handoff finished!",
|
|
||||||
zap.Int64("collectionID", segment.GetCollectionID()),
|
|
||||||
zap.Int64("partitionID", segment.GetPartitionID()),
|
|
||||||
zap.Int64("segmentID", segment.GetSegmentID()),
|
|
||||||
zap.Bool("faked", segment.GetIsFake()),
|
|
||||||
)
|
|
||||||
err := ob.cleanEvent(ctx, segment)
|
|
||||||
if err == nil {
|
|
||||||
delete(ob.handoffEvents, segment.GetSegmentID())
|
|
||||||
}
|
|
||||||
pos++
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ob.handoffSubmitOrders[collectionID] = partitionSubmitOrder[pos:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) cleanEvent(ctx context.Context, segmentInfo *querypb.SegmentInfo) error {
|
|
||||||
log := log.With(
|
|
||||||
zap.Int64("collectionID", segmentInfo.CollectionID),
|
|
||||||
zap.Int64("partitionID", segmentInfo.PartitionID),
|
|
||||||
zap.Int64("segmentID", segmentInfo.SegmentID),
|
|
||||||
)
|
|
||||||
|
|
||||||
// add retry logic
|
|
||||||
err := retry.Do(ctx, func() error {
|
|
||||||
return ob.store.RemoveHandoffEvent(segmentInfo)
|
|
||||||
}, retry.Attempts(5))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Warn("failed to clean handoff event from etcd", zap.Error(err))
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isSegmentExistOnTarget(segmentInfo *querypb.SegmentInfo) bool {
|
|
||||||
return ob.target.ContainSegment(segmentInfo.SegmentID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ob *HandoffObserver) isAllCompactFromReleased(segmentInfo *querypb.SegmentInfo) bool {
|
|
||||||
if !segmentInfo.CreatedByCompaction {
|
|
||||||
return ob.isGrowingSegmentReleased(segmentInfo.SegmentID)
|
|
||||||
}
|
|
||||||
for _, segment := range segmentInfo.CompactionFrom {
|
|
||||||
if !ob.isSegmentReleased(segment) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
@ -1,645 +0,0 @@
|
|||||||
// Licensed to the LF AI & Data foundation under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package observers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
|
||||||
"github.com/milvus-io/milvus/internal/util"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/suite"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultVecFieldID = 1
|
|
||||||
defaultIndexID = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
type HandoffObserverTestSuit struct {
|
|
||||||
suite.Suite
|
|
||||||
// Data
|
|
||||||
collection int64
|
|
||||||
partition int64
|
|
||||||
channel *meta.DmChannel
|
|
||||||
replicaNumber int32
|
|
||||||
nodes []int64
|
|
||||||
growingSegments []*datapb.SegmentInfo
|
|
||||||
sealedSegments []*datapb.SegmentInfo
|
|
||||||
|
|
||||||
//Mocks
|
|
||||||
idAllocator func() (int64, error)
|
|
||||||
etcd *clientv3.Client
|
|
||||||
kv *etcdkv.EtcdKV
|
|
||||||
|
|
||||||
//Dependency
|
|
||||||
store meta.Store
|
|
||||||
meta *meta.Meta
|
|
||||||
dist *meta.DistributionManager
|
|
||||||
target *meta.TargetManager
|
|
||||||
broker *meta.MockBroker
|
|
||||||
|
|
||||||
// Test Object
|
|
||||||
observer *HandoffObserver
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) SetupSuite() {
|
|
||||||
Params.Init()
|
|
||||||
|
|
||||||
suite.collection = 100
|
|
||||||
suite.partition = 10
|
|
||||||
suite.channel = meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
|
||||||
CollectionID: 100,
|
|
||||||
ChannelName: "100-dmc0",
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.sealedSegments = []*datapb.SegmentInfo{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: 100,
|
|
||||||
PartitionID: 10,
|
|
||||||
InsertChannel: "100-dmc0",
|
|
||||||
State: commonpb.SegmentState_Sealed,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: 2,
|
|
||||||
CollectionID: 100,
|
|
||||||
PartitionID: 10,
|
|
||||||
InsertChannel: "100-dmc1",
|
|
||||||
State: commonpb.SegmentState_Sealed,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
suite.replicaNumber = 1
|
|
||||||
suite.nodes = []int64{1, 2, 3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) SetupTest() {
|
|
||||||
// Mocks
|
|
||||||
var err error
|
|
||||||
suite.idAllocator = RandomIncrementIDAllocator()
|
|
||||||
log.Debug("create embedded etcd KV...")
|
|
||||||
config := GenerateEtcdConfig()
|
|
||||||
client, err := etcd.GetEtcdClient(&config)
|
|
||||||
suite.Require().NoError(err)
|
|
||||||
suite.kv = etcdkv.NewEtcdKV(client, Params.EtcdCfg.MetaRootPath+"-"+RandomMetaRootPath())
|
|
||||||
suite.Require().NoError(err)
|
|
||||||
log.Debug("create meta store...")
|
|
||||||
suite.store = meta.NewMetaStore(suite.kv)
|
|
||||||
|
|
||||||
// Dependency
|
|
||||||
suite.meta = meta.NewMeta(suite.idAllocator, suite.store)
|
|
||||||
suite.dist = meta.NewDistributionManager()
|
|
||||||
suite.target = meta.NewTargetManager()
|
|
||||||
suite.broker = meta.NewMockBroker(suite.T())
|
|
||||||
|
|
||||||
// Test Object
|
|
||||||
suite.observer = NewHandoffObserver(suite.store, suite.meta, suite.dist, suite.target, suite.broker)
|
|
||||||
suite.observer.Register(suite.collection)
|
|
||||||
suite.observer.StartHandoff(suite.collection)
|
|
||||||
suite.load()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TearDownTest() {
|
|
||||||
suite.observer.Stop()
|
|
||||||
suite.kv.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestFlushingHandoff() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
|
||||||
|
|
||||||
flushingSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(flushingSegment)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.target.ContainSegment(3)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
// fake load CompactTo Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
|
|
||||||
// fake release CompactFrom Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return len(suite.dist.LeaderViewManager.GetGrowingSegmentDist(3)) == 0
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
|
||||||
value, err := suite.kv.Load(key)
|
|
||||||
return len(value) == 0 && err != nil
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestCompactHandoff() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
|
||||||
compactSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(compactSegment)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.target.ContainSegment(3)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
// fake load CompactTo Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(1)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
// fake release CompactFrom Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}, 3: {NodeID: 3, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
|
||||||
value, err := suite.kv.Load(key)
|
|
||||||
return len(value) == 0 && err != nil
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestRecursiveHandoff() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
flushingSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
compactSegment1 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 4,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{3},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
compactSegment2 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 5,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{4},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
suite.produceHandOffEvent(flushingSegment)
|
|
||||||
suite.produceHandOffEvent(compactSegment1)
|
|
||||||
suite.produceHandOffEvent(compactSegment2)
|
|
||||||
|
|
||||||
// fake load CompactTo Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 5: {NodeID: 3, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2) && suite.target.ContainSegment(5)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(3) && !suite.target.ContainSegment(4)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
// fake release CompactFrom Segment
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}, 5: {NodeID: 3, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2) && suite.target.ContainSegment(5)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(3) && !suite.target.ContainSegment(4)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return len(suite.dist.LeaderViewManager.GetGrowingSegmentDist(3)) == 0
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, suite.collection, suite.partition, 3)
|
|
||||||
value, err := suite.kv.Load(key)
|
|
||||||
return len(value) == 0 && err != nil
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestReloadHandoffEventOrder() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
|
|
||||||
// fake handoff event from start
|
|
||||||
flushingSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
compactSegment1 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 9,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{3},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
compactSegment2 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 10,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{4},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
suite.produceHandOffEvent(flushingSegment)
|
|
||||||
suite.produceHandOffEvent(compactSegment1)
|
|
||||||
suite.produceHandOffEvent(compactSegment2)
|
|
||||||
|
|
||||||
keys, _, _, err := suite.kv.LoadWithRevision(util.HandoffSegmentPrefix)
|
|
||||||
suite.NoError(err)
|
|
||||||
suite.Equal(true, strings.HasSuffix(keys[0], "3"))
|
|
||||||
suite.Equal(true, strings.HasSuffix(keys[1], "9"))
|
|
||||||
suite.Equal(true, strings.HasSuffix(keys[2], "10"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestLoadHandoffEventFromStore() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
GrowingSegments: typeutil.NewUniqueSet(3),
|
|
||||||
})
|
|
||||||
|
|
||||||
// fake handoff event from start
|
|
||||||
compactSegment1 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 4,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
compactSegment2 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 5,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{3},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
suite.produceHandOffEvent(compactSegment1)
|
|
||||||
suite.produceHandOffEvent(compactSegment2)
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.target.ContainSegment(1) && suite.target.ContainSegment(2)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(4) && !suite.target.ContainSegment(5)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) produceHandOffEvent(segmentInfo *querypb.SegmentInfo) {
|
|
||||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
|
|
||||||
value, err := proto.Marshal(segmentInfo)
|
|
||||||
suite.NoError(err)
|
|
||||||
err = suite.kv.Save(key, string(value))
|
|
||||||
suite.NoError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) existHandOffEvent(segmentInfo *querypb.SegmentInfo) bool {
|
|
||||||
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
|
|
||||||
_, err := suite.kv.Load(key)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) load() {
|
|
||||||
// Mock meta data
|
|
||||||
replicas, err := suite.meta.ReplicaManager.Spawn(suite.collection, suite.replicaNumber)
|
|
||||||
suite.NoError(err)
|
|
||||||
for _, replica := range replicas {
|
|
||||||
replica.AddNode(suite.nodes...)
|
|
||||||
}
|
|
||||||
err = suite.meta.ReplicaManager.Put(replicas...)
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
err = suite.meta.PutCollection(&meta.Collection{
|
|
||||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
ReplicaNumber: suite.replicaNumber,
|
|
||||||
Status: querypb.LoadStatus_Loaded,
|
|
||||||
FieldIndexID: map[int64]int64{defaultVecFieldID: defaultIndexID},
|
|
||||||
},
|
|
||||||
LoadPercentage: 0,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
})
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
suite.target.AddDmChannel(suite.channel)
|
|
||||||
suite.target.AddSegment(suite.sealedSegments...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestHandoffOnUnloadedPartition() {
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 2,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{2222}, nil)
|
|
||||||
|
|
||||||
suite.observer.Register(suite.collection)
|
|
||||||
suite.observer.StartHandoff(suite.collection)
|
|
||||||
defer suite.observer.Unregister(context.TODO(), suite.collection)
|
|
||||||
|
|
||||||
compactSegment1 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 111,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: 1111,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
|
|
||||||
compactSegment2 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 222,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: 2222,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(compactSegment1)
|
|
||||||
suite.produceHandOffEvent(compactSegment2)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(111) && suite.target.ContainSegment(222)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestUnRegisterHandoff() {
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{
|
|
||||||
ID: 2,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{1111, 2222}, nil)
|
|
||||||
suite.observer.Register(suite.collection)
|
|
||||||
compactSegment1 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 111,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: 1111,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(compactSegment1)
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.observer.GetEventNum() == 1
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
suite.observer.Unregister(context.TODO(), suite.collection)
|
|
||||||
|
|
||||||
suite.observer.Register(suite.collection)
|
|
||||||
defer suite.observer.Unregister(context.TODO(), suite.collection)
|
|
||||||
compactSegment2 := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 222,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: 2222,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{2},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IndexInfos: []*querypb.FieldIndexInfo{{IndexID: defaultIndexID}},
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(compactSegment2)
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return suite.observer.GetEventNum() == 1
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestFilterOutEventByIndexID() {
|
|
||||||
// init leader view
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 1 * time.Second
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
compactSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
SegmentState: commonpb.SegmentState_Sealed,
|
|
||||||
CompactionFrom: []int64{1},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(compactSegment)
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
suite.observer.handoffEventLock.RLock()
|
|
||||||
defer suite.observer.handoffEventLock.RUnlock()
|
|
||||||
_, ok := suite.observer.handoffEvents[compactSegment.GetSegmentID()]
|
|
||||||
return !ok && !suite.target.ContainSegment(3) && !suite.existHandOffEvent(compactSegment)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *HandoffObserverTestSuit) TestFakedSegmentHandoff() {
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}},
|
|
||||||
})
|
|
||||||
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partition}, nil)
|
|
||||||
|
|
||||||
Params.QueryCoordCfg.CheckHandoffInterval = 200 * time.Millisecond
|
|
||||||
err := suite.observer.Start(context.Background())
|
|
||||||
suite.NoError(err)
|
|
||||||
|
|
||||||
handoffSegment := &querypb.SegmentInfo{
|
|
||||||
SegmentID: 3,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: suite.partition,
|
|
||||||
CompactionFrom: []int64{1, 2},
|
|
||||||
CreatedByCompaction: true,
|
|
||||||
IsFake: true,
|
|
||||||
}
|
|
||||||
suite.produceHandOffEvent(handoffSegment)
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{
|
|
||||||
ID: 1,
|
|
||||||
CollectionID: suite.collection,
|
|
||||||
Channel: suite.channel.ChannelName,
|
|
||||||
Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}, 2: {NodeID: 2, Version: 0}},
|
|
||||||
})
|
|
||||||
|
|
||||||
suite.Eventually(func() bool {
|
|
||||||
return !suite.target.ContainSegment(1) && !suite.target.ContainSegment(2)
|
|
||||||
}, 3*time.Second, 1*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandoffObserverSuit(t *testing.T) {
|
|
||||||
suite.Run(t, new(HandoffObserverTestSuit))
|
|
||||||
}
|
|
||||||
@ -106,7 +106,7 @@ func (o *LeaderObserver) findNeedLoadedSegments(leaderView *meta.LeaderView, dis
|
|||||||
for _, s := range dists {
|
for _, s := range dists {
|
||||||
version, ok := leaderView.Segments[s.GetID()]
|
version, ok := leaderView.Segments[s.GetID()]
|
||||||
if ok && version.GetVersion() >= s.Version ||
|
if ok && version.GetVersion() >= s.Version ||
|
||||||
!o.target.ContainSegment(s.GetID()) {
|
o.target.GetHistoricalSegment(s.CollectionID, s.GetID(), meta.CurrentTarget) == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ret = append(ret, &querypb.SyncAction{
|
ret = append(ret, &querypb.SyncAction{
|
||||||
@ -128,7 +128,7 @@ func (o *LeaderObserver) findNeedRemovedSegments(leaderView *meta.LeaderView, di
|
|||||||
}
|
}
|
||||||
for sid := range leaderView.Segments {
|
for sid := range leaderView.Segments {
|
||||||
_, ok := distMap[sid]
|
_, ok := distMap[sid]
|
||||||
if ok || o.target.ContainSegment(sid) {
|
if ok || o.target.GetHistoricalSegment(leaderView.CollectionID, sid, meta.CurrentTarget) != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ret = append(ret, &querypb.SyncAction{
|
ret = append(ret, &querypb.SyncAction{
|
||||||
|
|||||||
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
@ -40,6 +41,9 @@ type LeaderObserverTestSuite struct {
|
|||||||
observer *LeaderObserver
|
observer *LeaderObserver
|
||||||
kv *etcdkv.EtcdKV
|
kv *etcdkv.EtcdKV
|
||||||
mockCluster *session.MockCluster
|
mockCluster *session.MockCluster
|
||||||
|
|
||||||
|
meta *meta.Meta
|
||||||
|
broker *meta.MockBroker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *LeaderObserverTestSuite) SetupSuite() {
|
func (suite *LeaderObserverTestSuite) SetupSuite() {
|
||||||
@ -56,12 +60,13 @@ func (suite *LeaderObserverTestSuite) SetupTest() {
|
|||||||
// meta
|
// meta
|
||||||
store := meta.NewMetaStore(suite.kv)
|
store := meta.NewMetaStore(suite.kv)
|
||||||
idAllocator := RandomIncrementIDAllocator()
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
testMeta := meta.NewMeta(idAllocator, store)
|
suite.meta = meta.NewMeta(idAllocator, store)
|
||||||
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
|
||||||
suite.mockCluster = session.NewMockCluster(suite.T())
|
suite.mockCluster = session.NewMockCluster(suite.T())
|
||||||
distManager := meta.NewDistributionManager()
|
distManager := meta.NewDistributionManager()
|
||||||
targetManager := meta.NewTargetManager()
|
targetManager := meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
suite.observer = NewLeaderObserver(distManager, testMeta, targetManager, suite.mockCluster)
|
suite.observer = NewLeaderObserver(distManager, suite.meta, targetManager, suite.mockCluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *LeaderObserverTestSuite) TearDownTest() {
|
func (suite *LeaderObserverTestSuite) TearDownTest() {
|
||||||
@ -73,10 +78,25 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() {
|
|||||||
observer := suite.observer
|
observer := suite.observer
|
||||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
segments := []*datapb.SegmentBinlogs{
|
||||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, segments, nil)
|
||||||
|
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
observer.target.UpdateCollectionCurrentTarget(1)
|
||||||
|
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 2, 1, "test-insert-channel"))
|
||||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||||
expectReq := &querypb.SyncDistributionRequest{
|
expectReq := &querypb.SyncDistributionRequest{
|
||||||
Base: &commonpb.MsgBase{
|
Base: &commonpb.MsgBase{
|
||||||
MsgType: commonpb.MsgType_SyncDistribution,
|
MsgType: commonpb.MsgType_SyncDistribution,
|
||||||
@ -113,13 +133,28 @@ func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() {
|
|||||||
observer := suite.observer
|
observer := suite.observer
|
||||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1))
|
||||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
segments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, segments, nil)
|
||||||
|
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
observer.target.UpdateCollectionCurrentTarget(1)
|
||||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
|
|
||||||
// The leader view saw the segment on new node,
|
// The leader view saw the segment on new node,
|
||||||
// but another nodes not yet
|
// but another nodes not yet
|
||||||
leaderView := utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{})
|
leaderView := utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{})
|
||||||
leaderView.Segments[1] = &querypb.SegmentDist{
|
leaderView.Segments[1] = &querypb.SegmentDist{
|
||||||
NodeID: 2,
|
NodeID: 2,
|
||||||
Version: 2,
|
Version: 2,
|
||||||
@ -136,12 +171,27 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() {
|
|||||||
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 2))
|
observer.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 2))
|
||||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(2, 1, []int64{3, 4}))
|
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(2, 1, []int64{3, 4}))
|
||||||
observer.target.AddSegment(utils.CreateTestSegmentInfo(1, 1, 1, "test-insert-channel"))
|
segments := []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 1,
|
||||||
|
InsertChannel: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
channels := []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: 1,
|
||||||
|
ChannelName: "test-insert-channel",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return(
|
||||||
|
channels, segments, nil)
|
||||||
|
observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1))
|
||||||
|
observer.target.UpdateCollectionCurrentTarget(1)
|
||||||
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
observer.dist.SegmentDistManager.Update(1, utils.CreateTestSegment(1, 1, 1, 1, 1, "test-insert-channel"))
|
||||||
observer.dist.SegmentDistManager.Update(4, utils.CreateTestSegment(1, 1, 1, 4, 2, "test-insert-channel"))
|
observer.dist.SegmentDistManager.Update(4, utils.CreateTestSegment(1, 1, 1, 4, 2, "test-insert-channel"))
|
||||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, []int64{}))
|
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{}, map[int64]*meta.Segment{}))
|
||||||
observer.dist.LeaderViewManager.Update(4, utils.CreateTestLeaderView(4, 1, "test-insert-channel", map[int64]int64{1: 4}, []int64{}))
|
observer.dist.LeaderViewManager.Update(4, utils.CreateTestLeaderView(4, 1, "test-insert-channel", map[int64]int64{1: 4}, map[int64]*meta.Segment{}))
|
||||||
expectReq := &querypb.SyncDistributionRequest{
|
expectReq := &querypb.SyncDistributionRequest{
|
||||||
Base: &commonpb.MsgBase{
|
Base: &commonpb.MsgBase{
|
||||||
MsgType: commonpb.MsgType_SyncDistribution,
|
MsgType: commonpb.MsgType_SyncDistribution,
|
||||||
@ -180,7 +230,7 @@ func (suite *LeaderObserverTestSuite) TestSyncRemovedSegments() {
|
|||||||
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
observer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2}))
|
||||||
|
|
||||||
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
observer.dist.ChannelDistManager.Update(2, utils.CreateTestChannel(1, 2, 1, "test-insert-channel"))
|
||||||
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, []int64{}))
|
observer.dist.LeaderViewManager.Update(2, utils.CreateTestLeaderView(2, 1, "test-insert-channel", map[int64]int64{3: 2}, map[int64]*meta.Segment{}))
|
||||||
|
|
||||||
expectReq := &querypb.SyncDistributionRequest{
|
expectReq := &querypb.SyncDistributionRequest{
|
||||||
Base: &commonpb.MsgBase{
|
Base: &commonpb.MsgBase{
|
||||||
|
|||||||
172
internal/querycoordv2/observers/target_observer.go
Normal file
172
internal/querycoordv2/observers/target_observer.go
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
// Licensed to the LF AI & Data foundation under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package observers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
|
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TargetObserver struct {
|
||||||
|
c chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
meta *meta.Meta
|
||||||
|
targetMgr *meta.TargetManager
|
||||||
|
distMgr *meta.DistributionManager
|
||||||
|
broker meta.Broker
|
||||||
|
|
||||||
|
nextTargetLastUpdate map[int64]time.Time
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTargetObserver(meta *meta.Meta, targetMgr *meta.TargetManager, distMgr *meta.DistributionManager, broker meta.Broker) *TargetObserver {
|
||||||
|
return &TargetObserver{
|
||||||
|
c: make(chan struct{}),
|
||||||
|
meta: meta,
|
||||||
|
targetMgr: targetMgr,
|
||||||
|
distMgr: distMgr,
|
||||||
|
broker: broker,
|
||||||
|
nextTargetLastUpdate: make(map[int64]time.Time),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) Start(ctx context.Context) {
|
||||||
|
ob.wg.Add(1)
|
||||||
|
go ob.schedule(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) Stop() {
|
||||||
|
ob.stopOnce.Do(func() {
|
||||||
|
close(ob.c)
|
||||||
|
ob.wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) schedule(ctx context.Context) {
|
||||||
|
defer ob.wg.Done()
|
||||||
|
log.Info("Start update next target loop")
|
||||||
|
|
||||||
|
ticker := time.NewTicker(params.Params.QueryCoordCfg.UpdateNextTargetInterval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Info("Close target observer due to context canceled")
|
||||||
|
return
|
||||||
|
case <-ob.c:
|
||||||
|
log.Info("Close target observer")
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
ob.tryUpdateTarget()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) tryUpdateTarget() {
|
||||||
|
collections := ob.meta.GetAll()
|
||||||
|
for _, collectionID := range collections {
|
||||||
|
if ob.shouldUpdateCurrentTarget(collectionID) {
|
||||||
|
ob.updateCurrentTarget(collectionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ob.shouldUpdateNextTarget(collectionID) {
|
||||||
|
// update next target in collection level
|
||||||
|
ob.UpdateNextTarget(collectionID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
collectionSet := typeutil.NewUniqueSet(collections...)
|
||||||
|
// for collection which has been removed from target, try to clear nextTargetLastUpdate
|
||||||
|
for collection := range ob.nextTargetLastUpdate {
|
||||||
|
if !collectionSet.Contain(collection) {
|
||||||
|
delete(ob.nextTargetLastUpdate, collection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) shouldUpdateNextTarget(collectionID int64) bool {
|
||||||
|
return !ob.targetMgr.IsNextTargetExist(collectionID) || ob.isNextTargetExpired(collectionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) isNextTargetExpired(collectionID int64) bool {
|
||||||
|
return time.Since(ob.nextTargetLastUpdate[collectionID]) > params.Params.QueryCoordCfg.NextTargetSurviveTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) UpdateNextTarget(collectionID int64) {
|
||||||
|
log := log.With(zap.Int64("collectionID", collectionID))
|
||||||
|
|
||||||
|
log.Warn("observer trigger update next target")
|
||||||
|
err := ob.targetMgr.UpdateCollectionNextTarget(collectionID)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to update next target for collection",
|
||||||
|
zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ob.updateNextTargetTimestamp(collectionID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) updateNextTargetTimestamp(collectionID int64) {
|
||||||
|
ob.nextTargetLastUpdate[collectionID] = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) shouldUpdateCurrentTarget(collectionID int64) bool {
|
||||||
|
replicaNum := len(ob.meta.ReplicaManager.GetByCollection(collectionID))
|
||||||
|
|
||||||
|
// check channel first
|
||||||
|
channelNames := ob.targetMgr.GetDmChannelsByCollection(collectionID, meta.NextTarget)
|
||||||
|
if len(channelNames) == 0 {
|
||||||
|
// next target is empty, no need to update
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, channel := range channelNames {
|
||||||
|
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||||
|
collectionID,
|
||||||
|
ob.distMgr.LeaderViewManager.GetChannelDist(channel.GetChannelName()))
|
||||||
|
if len(group) < replicaNum {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// and last check historical segment
|
||||||
|
historicalSegments := ob.targetMgr.GetHistoricalSegmentsByCollection(collectionID, meta.NextTarget)
|
||||||
|
for _, segment := range historicalSegments {
|
||||||
|
group := utils.GroupNodesByReplica(ob.meta.ReplicaManager,
|
||||||
|
collectionID,
|
||||||
|
ob.distMgr.LeaderViewManager.GetSealedSegmentDist(segment.GetID()))
|
||||||
|
if len(group) < replicaNum {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ob *TargetObserver) updateCurrentTarget(collectionID int64) {
|
||||||
|
log.Warn("observer trigger update current target",
|
||||||
|
zap.Int64("collectionID", collectionID))
|
||||||
|
ob.targetMgr.UpdateCollectionCurrentTarget(collectionID)
|
||||||
|
}
|
||||||
141
internal/querycoordv2/observers/target_observer_test.go
Normal file
141
internal/querycoordv2/observers/target_observer_test.go
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
// Licensed to the LF AI & Data foundation under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package observers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
|
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TargetObserverSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
|
||||||
|
kv *etcdkv.EtcdKV
|
||||||
|
//dependency
|
||||||
|
meta *meta.Meta
|
||||||
|
targetMgr *meta.TargetManager
|
||||||
|
distMgr *meta.DistributionManager
|
||||||
|
broker *meta.MockBroker
|
||||||
|
|
||||||
|
observer *TargetObserver
|
||||||
|
|
||||||
|
collectionID int64
|
||||||
|
partitionID int64
|
||||||
|
nextTargetSegments []*datapb.SegmentBinlogs
|
||||||
|
nextTargetChannels []*datapb.VchannelInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetObserverSuite) SetupSuite() {
|
||||||
|
Params.Init()
|
||||||
|
Params.QueryCoordCfg.UpdateNextTargetInterval = 3 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetObserverSuite) SetupTest() {
|
||||||
|
var err error
|
||||||
|
config := GenerateEtcdConfig()
|
||||||
|
cli, err := etcd.GetEtcdClient(&config)
|
||||||
|
suite.Require().NoError(err)
|
||||||
|
suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath)
|
||||||
|
|
||||||
|
// meta
|
||||||
|
store := meta.NewMetaStore(suite.kv)
|
||||||
|
idAllocator := RandomIncrementIDAllocator()
|
||||||
|
suite.meta = meta.NewMeta(idAllocator, store)
|
||||||
|
|
||||||
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
|
suite.distMgr = meta.NewDistributionManager()
|
||||||
|
suite.observer = NewTargetObserver(suite.meta, suite.targetMgr, suite.distMgr, suite.broker)
|
||||||
|
|
||||||
|
suite.observer.Start(context.TODO())
|
||||||
|
|
||||||
|
suite.collectionID = int64(1000)
|
||||||
|
suite.partitionID = int64(100)
|
||||||
|
|
||||||
|
err = suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(suite.collectionID, 1))
|
||||||
|
suite.NoError(err)
|
||||||
|
err = suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collectionID, suite.partitionID))
|
||||||
|
suite.NoError(err)
|
||||||
|
|
||||||
|
suite.nextTargetChannels = []*datapb.VchannelInfo{
|
||||||
|
{
|
||||||
|
CollectionID: suite.collectionID,
|
||||||
|
ChannelName: "channel-1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CollectionID: suite.collectionID,
|
||||||
|
ChannelName: "channel-2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.nextTargetSegments = []*datapb.SegmentBinlogs{
|
||||||
|
{
|
||||||
|
SegmentID: 11,
|
||||||
|
InsertChannel: "channel-1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SegmentID: 12,
|
||||||
|
InsertChannel: "channel-2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, mock.Anything, mock.Anything).Return(suite.nextTargetChannels, suite.nextTargetSegments, nil)
|
||||||
|
suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partitionID}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetObserverSuite) TestTriggerUpdateTarget() {
|
||||||
|
suite.Eventually(func() bool {
|
||||||
|
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||||
|
}, 5*time.Second, 1*time.Second)
|
||||||
|
|
||||||
|
suite.Eventually(func() bool {
|
||||||
|
return len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.NextTarget)) == 2
|
||||||
|
}, 5*time.Second, 1*time.Second)
|
||||||
|
|
||||||
|
suite.distMgr.SegmentDistManager.Update(2, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 11, 2, 0, "channel-1"))
|
||||||
|
suite.distMgr.SegmentDistManager.Update(2, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 12, 2, 1, "channel-2"))
|
||||||
|
suite.distMgr.ChannelDistManager.Update(2, utils.CreateTestChannel(suite.collectionID, 2, 0, "channel-1"))
|
||||||
|
suite.distMgr.ChannelDistManager.Update(2, utils.CreateTestChannel(suite.collectionID, 2, 1, "channel-2"))
|
||||||
|
|
||||||
|
suite.Eventually(func() bool {
|
||||||
|
return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.CurrentTarget)) == 2
|
||||||
|
}, 5*time.Second, 1*time.Second)
|
||||||
|
|
||||||
|
suite.Eventually(func() bool {
|
||||||
|
return len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.CurrentTarget)) == 2
|
||||||
|
}, 5*time.Second, 1*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *TargetObserverSuite) TearDownSuite() {
|
||||||
|
suite.kv.Close()
|
||||||
|
suite.observer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTargetManager(t *testing.T) {
|
||||||
|
suite.Run(t, new(TargetObserverSuite))
|
||||||
|
}
|
||||||
@ -27,11 +27,6 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/samber/lo"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||||
"github.com/milvus-io/milvus/internal/allocator"
|
"github.com/milvus-io/milvus/internal/allocator"
|
||||||
@ -39,7 +34,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/kv"
|
"github.com/milvus-io/milvus/internal/kv"
|
||||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/checkers"
|
"github.com/milvus-io/milvus/internal/querycoordv2/checkers"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
||||||
@ -57,6 +51,9 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -106,7 +103,7 @@ type Server struct {
|
|||||||
// Observers
|
// Observers
|
||||||
collectionObserver *observers.CollectionObserver
|
collectionObserver *observers.CollectionObserver
|
||||||
leaderObserver *observers.LeaderObserver
|
leaderObserver *observers.LeaderObserver
|
||||||
handoffObserver *observers.HandoffObserver
|
targetObserver *observers.TargetObserver
|
||||||
|
|
||||||
balancer balance.Balance
|
balancer balance.Balance
|
||||||
|
|
||||||
@ -264,12 +261,13 @@ func (s *Server) initMeta() error {
|
|||||||
ChannelDistManager: meta.NewChannelDistManager(),
|
ChannelDistManager: meta.NewChannelDistManager(),
|
||||||
LeaderViewManager: meta.NewLeaderViewManager(),
|
LeaderViewManager: meta.NewLeaderViewManager(),
|
||||||
}
|
}
|
||||||
s.targetMgr = meta.NewTargetManager()
|
|
||||||
s.broker = meta.NewCoordinatorBroker(
|
s.broker = meta.NewCoordinatorBroker(
|
||||||
s.dataCoord,
|
s.dataCoord,
|
||||||
s.rootCoord,
|
s.rootCoord,
|
||||||
s.indexCoord,
|
s.indexCoord,
|
||||||
)
|
)
|
||||||
|
s.targetMgr = meta.NewTargetManager(s.broker, s.meta)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -286,11 +284,10 @@ func (s *Server) initObserver() {
|
|||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
s.cluster,
|
s.cluster,
|
||||||
)
|
)
|
||||||
s.handoffObserver = observers.NewHandoffObserver(
|
s.targetObserver = observers.NewTargetObserver(
|
||||||
s.store,
|
|
||||||
s.meta,
|
s.meta,
|
||||||
s.dist,
|
|
||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
|
s.dist,
|
||||||
s.broker,
|
s.broker,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -311,12 +308,6 @@ func (s *Server) Start() error {
|
|||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
go s.watchNodes(revision)
|
go s.watchNodes(revision)
|
||||||
|
|
||||||
// handoff master start before recover collection, to clean all outdated handoff event.
|
|
||||||
if err := s.handoffObserver.Start(s.ctx); err != nil {
|
|
||||||
log.Error("start handoff observer failed, exit...", zap.Error(err))
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("start recovering dist and target")
|
log.Info("start recovering dist and target")
|
||||||
err = s.recover()
|
err = s.recover()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -338,6 +329,7 @@ func (s *Server) Start() error {
|
|||||||
log.Info("start observers...")
|
log.Info("start observers...")
|
||||||
s.collectionObserver.Start(s.ctx)
|
s.collectionObserver.Start(s.ctx)
|
||||||
s.leaderObserver.Start(s.ctx)
|
s.leaderObserver.Start(s.ctx)
|
||||||
|
s.targetObserver.Start(s.ctx)
|
||||||
|
|
||||||
if s.enableActiveStandBy {
|
if s.enableActiveStandBy {
|
||||||
s.activateFunc = func() {
|
s.activateFunc = func() {
|
||||||
@ -393,8 +385,8 @@ func (s *Server) Stop() error {
|
|||||||
if s.leaderObserver != nil {
|
if s.leaderObserver != nil {
|
||||||
s.leaderObserver.Stop()
|
s.leaderObserver.Stop()
|
||||||
}
|
}
|
||||||
if s.handoffObserver != nil {
|
if s.targetObserver != nil {
|
||||||
s.handoffObserver.Stop()
|
s.targetObserver.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
@ -506,35 +498,12 @@ func (s *Server) recover() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) recoverCollectionTargets(ctx context.Context, collection int64) error {
|
func (s *Server) recoverCollectionTargets(ctx context.Context, collection int64) error {
|
||||||
var (
|
err := s.targetMgr.UpdateCollectionNextTarget(collection)
|
||||||
partitions []int64
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if s.meta.GetLoadType(collection) == querypb.LoadType_LoadCollection {
|
|
||||||
partitions, err = s.broker.GetPartitions(ctx, collection)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "failed to get partitions from RootCoord"
|
msg := "failed to update next target for collection"
|
||||||
log.Error(msg, zap.Error(err))
|
log.Error(msg, zap.Error(err))
|
||||||
return utils.WrapError(msg, err)
|
return utils.WrapError(msg, err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
partitions = lo.Map(s.meta.GetPartitionsByCollection(collection), func(partition *meta.Partition, _ int) int64 {
|
|
||||||
return partition.GetPartitionID()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
s.handoffObserver.Register(collection)
|
|
||||||
err = utils.RegisterTargets(
|
|
||||||
ctx,
|
|
||||||
s.targetMgr,
|
|
||||||
s.broker,
|
|
||||||
collection,
|
|
||||||
partitions,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.handoffObserver.StartHandoff(collection)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -620,23 +589,12 @@ func (s *Server) handleNodeDown(node int64) {
|
|||||||
// are missed, it will recover for a while.
|
// are missed, it will recover for a while.
|
||||||
channels := s.dist.ChannelDistManager.GetByNode(node)
|
channels := s.dist.ChannelDistManager.GetByNode(node)
|
||||||
for _, channel := range channels {
|
for _, channel := range channels {
|
||||||
partitions, err := utils.GetPartitions(s.meta.CollectionManager,
|
err := s.targetMgr.UpdateCollectionNextTarget(channel.GetCollectionID())
|
||||||
s.broker,
|
|
||||||
channel.GetCollectionID())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("failed to refresh targets of collection",
|
msg := "failed to update next targets for collection"
|
||||||
zap.Int64("collectionID", channel.GetCollectionID()),
|
log.Error(msg,
|
||||||
zap.Error(err))
|
|
||||||
}
|
|
||||||
err = utils.RegisterTargets(s.ctx,
|
|
||||||
s.targetMgr,
|
|
||||||
s.broker,
|
|
||||||
channel.GetCollectionID(),
|
|
||||||
partitions)
|
|
||||||
if err != nil {
|
|
||||||
log.Warn("failed to refresh targets of collection",
|
|
||||||
zap.Int64("collectionID", channel.GetCollectionID()),
|
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
"github.com/milvus-io/milvus/internal/querycoordv2/dist"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/mocks"
|
"github.com/milvus-io/milvus/internal/querycoordv2/mocks"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||||
@ -272,11 +271,11 @@ func (suite *ServerSuite) loadAll() {
|
|||||||
func (suite *ServerSuite) assertLoaded(collection int64) {
|
func (suite *ServerSuite) assertLoaded(collection int64) {
|
||||||
suite.True(suite.server.meta.Exist(collection))
|
suite.True(suite.server.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.NotNil(suite.server.targetMgr.GetDmChannel(channel))
|
suite.NotNil(suite.server.targetMgr.GetDmChannel(collection, channel, meta.NextTarget))
|
||||||
}
|
}
|
||||||
for _, partitions := range suite.segments[collection] {
|
for _, partitions := range suite.segments[collection] {
|
||||||
for _, segment := range partitions {
|
for _, segment := range partitions {
|
||||||
suite.NotNil(suite.server.targetMgr.GetSegment(segment))
|
suite.NotNil(suite.server.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -346,6 +345,7 @@ func (suite *ServerSuite) updateCollectionStatus(collectionID int64, status quer
|
|||||||
func (suite *ServerSuite) hackServer() {
|
func (suite *ServerSuite) hackServer() {
|
||||||
suite.broker = meta.NewMockBroker(suite.T())
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
suite.server.broker = suite.broker
|
suite.server.broker = suite.broker
|
||||||
|
suite.server.targetMgr = meta.NewTargetManager(suite.broker, suite.server.meta)
|
||||||
suite.server.taskScheduler = task.NewScheduler(
|
suite.server.taskScheduler = task.NewScheduler(
|
||||||
suite.server.ctx,
|
suite.server.ctx,
|
||||||
suite.server.meta,
|
suite.server.meta,
|
||||||
@ -355,13 +355,6 @@ func (suite *ServerSuite) hackServer() {
|
|||||||
suite.server.cluster,
|
suite.server.cluster,
|
||||||
suite.server.nodeMgr,
|
suite.server.nodeMgr,
|
||||||
)
|
)
|
||||||
suite.server.handoffObserver = observers.NewHandoffObserver(
|
|
||||||
suite.server.store,
|
|
||||||
suite.server.meta,
|
|
||||||
suite.server.dist,
|
|
||||||
suite.server.targetMgr,
|
|
||||||
suite.server.broker,
|
|
||||||
)
|
|
||||||
suite.server.distController = dist.NewDistController(
|
suite.server.distController = dist.NewDistController(
|
||||||
suite.server.cluster,
|
suite.server.cluster,
|
||||||
suite.server.nodeMgr,
|
suite.server.nodeMgr,
|
||||||
|
|||||||
@ -22,11 +22,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
|
||||||
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
@ -36,11 +31,14 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
"github.com/milvus-io/milvus/internal/util/timerecord"
|
"github.com/milvus-io/milvus/internal/util/timerecord"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -208,7 +206,6 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
|||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
s.broker,
|
s.broker,
|
||||||
s.nodeMgr,
|
s.nodeMgr,
|
||||||
s.handoffObserver,
|
|
||||||
)
|
)
|
||||||
s.jobScheduler.Add(loadJob)
|
s.jobScheduler.Add(loadJob)
|
||||||
err := loadJob.Wait()
|
err := loadJob.Wait()
|
||||||
@ -245,7 +242,6 @@ func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
|
|||||||
s.dist,
|
s.dist,
|
||||||
s.meta,
|
s.meta,
|
||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
s.handoffObserver,
|
|
||||||
)
|
)
|
||||||
s.jobScheduler.Add(releaseJob)
|
s.jobScheduler.Add(releaseJob)
|
||||||
err := releaseJob.Wait()
|
err := releaseJob.Wait()
|
||||||
@ -288,7 +284,6 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
|||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
s.broker,
|
s.broker,
|
||||||
s.nodeMgr,
|
s.nodeMgr,
|
||||||
s.handoffObserver,
|
|
||||||
)
|
)
|
||||||
s.jobScheduler.Add(loadJob)
|
s.jobScheduler.Add(loadJob)
|
||||||
err := loadJob.Wait()
|
err := loadJob.Wait()
|
||||||
@ -332,7 +327,6 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
|
|||||||
s.dist,
|
s.dist,
|
||||||
s.meta,
|
s.meta,
|
||||||
s.targetMgr,
|
s.targetMgr,
|
||||||
s.handoffObserver,
|
|
||||||
)
|
)
|
||||||
s.jobScheduler.Add(releaseJob)
|
s.jobScheduler.Add(releaseJob)
|
||||||
err := releaseJob.Wait()
|
err := releaseJob.Wait()
|
||||||
@ -661,7 +655,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
channels := s.targetMgr.GetDmChannelsByCollection(req.GetCollectionID())
|
channels := s.targetMgr.GetDmChannelsByCollection(req.GetCollectionID(), meta.CurrentTarget)
|
||||||
if len(channels) == 0 {
|
if len(channels) == 0 {
|
||||||
msg := "failed to get channels"
|
msg := "failed to get channels"
|
||||||
log.Warn(msg, zap.Error(meta.ErrCollectionNotFound))
|
log.Warn(msg, zap.Error(meta.ErrCollectionNotFound))
|
||||||
|
|||||||
@ -31,7 +31,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
"github.com/milvus-io/milvus/internal/querycoordv2/balance"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
"github.com/milvus-io/milvus/internal/querycoordv2/job"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/observers"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
"github.com/milvus-io/milvus/internal/querycoordv2/task"
|
||||||
@ -67,7 +66,6 @@ type ServiceSuite struct {
|
|||||||
nodeMgr *session.NodeManager
|
nodeMgr *session.NodeManager
|
||||||
jobScheduler *job.Scheduler
|
jobScheduler *job.Scheduler
|
||||||
taskScheduler *task.MockScheduler
|
taskScheduler *task.MockScheduler
|
||||||
handoffObserver *observers.HandoffObserver
|
|
||||||
balancer balance.Balance
|
balancer balance.Balance
|
||||||
|
|
||||||
// Test object
|
// Test object
|
||||||
@ -117,8 +115,8 @@ func (suite *ServiceSuite) SetupTest() {
|
|||||||
suite.store = meta.NewMetaStore(suite.kv)
|
suite.store = meta.NewMetaStore(suite.kv)
|
||||||
suite.dist = meta.NewDistributionManager()
|
suite.dist = meta.NewDistributionManager()
|
||||||
suite.meta = meta.NewMeta(params.RandomIncrementIDAllocator(), suite.store)
|
suite.meta = meta.NewMeta(params.RandomIncrementIDAllocator(), suite.store)
|
||||||
suite.targetMgr = meta.NewTargetManager()
|
|
||||||
suite.broker = meta.NewMockBroker(suite.T())
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
suite.nodeMgr = session.NewNodeManager()
|
suite.nodeMgr = session.NewNodeManager()
|
||||||
for _, node := range suite.nodes {
|
for _, node := range suite.nodes {
|
||||||
suite.nodeMgr.Add(session.NewNodeInfo(node, "localhost"))
|
suite.nodeMgr.Add(session.NewNodeInfo(node, "localhost"))
|
||||||
@ -127,13 +125,6 @@ func (suite *ServiceSuite) SetupTest() {
|
|||||||
suite.jobScheduler = job.NewScheduler()
|
suite.jobScheduler = job.NewScheduler()
|
||||||
suite.taskScheduler = task.NewMockScheduler(suite.T())
|
suite.taskScheduler = task.NewMockScheduler(suite.T())
|
||||||
suite.jobScheduler.Start(context.Background())
|
suite.jobScheduler.Start(context.Background())
|
||||||
suite.handoffObserver = observers.NewHandoffObserver(
|
|
||||||
suite.store,
|
|
||||||
suite.meta,
|
|
||||||
suite.dist,
|
|
||||||
suite.targetMgr,
|
|
||||||
suite.broker,
|
|
||||||
)
|
|
||||||
suite.balancer = balance.NewRowCountBasedBalancer(
|
suite.balancer = balance.NewRowCountBasedBalancer(
|
||||||
suite.taskScheduler,
|
suite.taskScheduler,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
@ -155,7 +146,6 @@ func (suite *ServiceSuite) SetupTest() {
|
|||||||
jobScheduler: suite.jobScheduler,
|
jobScheduler: suite.jobScheduler,
|
||||||
taskScheduler: suite.taskScheduler,
|
taskScheduler: suite.taskScheduler,
|
||||||
balancer: suite.balancer,
|
balancer: suite.balancer,
|
||||||
handoffObserver: suite.handoffObserver,
|
|
||||||
}
|
}
|
||||||
suite.server.UpdateStateCode(commonpb.StateCode_Healthy)
|
suite.server.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||||
}
|
}
|
||||||
@ -871,7 +861,6 @@ func (suite *ServiceSuite) loadAll() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.jobScheduler.Add(job)
|
suite.jobScheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -879,6 +868,7 @@ func (suite *ServiceSuite) loadAll() {
|
|||||||
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
suite.NotNil(suite.meta.GetCollection(collection))
|
suite.NotNil(suite.meta.GetCollection(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||||
} else {
|
} else {
|
||||||
req := &querypb.LoadPartitionsRequest{
|
req := &querypb.LoadPartitionsRequest{
|
||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
@ -893,7 +883,6 @@ func (suite *ServiceSuite) loadAll() {
|
|||||||
suite.targetMgr,
|
suite.targetMgr,
|
||||||
suite.broker,
|
suite.broker,
|
||||||
suite.nodeMgr,
|
suite.nodeMgr,
|
||||||
suite.handoffObserver,
|
|
||||||
)
|
)
|
||||||
suite.jobScheduler.Add(job)
|
suite.jobScheduler.Add(job)
|
||||||
err := job.Wait()
|
err := job.Wait()
|
||||||
@ -901,6 +890,7 @@ func (suite *ServiceSuite) loadAll() {
|
|||||||
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
suite.EqualValues(suite.replicaNumber[collection], suite.meta.GetReplicaNumber(collection))
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
suite.NotNil(suite.meta.GetPartitionsByCollection(collection))
|
||||||
|
suite.targetMgr.UpdateCollectionCurrentTarget(collection)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -908,11 +898,11 @@ func (suite *ServiceSuite) loadAll() {
|
|||||||
func (suite *ServiceSuite) assertLoaded(collection int64) {
|
func (suite *ServiceSuite) assertLoaded(collection int64) {
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.NextTarget))
|
||||||
}
|
}
|
||||||
for _, partitions := range suite.segments[collection] {
|
for _, partitions := range suite.segments[collection] {
|
||||||
for _, segment := range partitions {
|
for _, segment := range partitions {
|
||||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -920,7 +910,7 @@ func (suite *ServiceSuite) assertLoaded(collection int64) {
|
|||||||
func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ...int64) {
|
func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ...int64) {
|
||||||
suite.True(suite.meta.Exist(collection))
|
suite.True(suite.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.NotNil(suite.targetMgr.GetDmChannel(channel))
|
suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
partitionSet := typeutil.NewUniqueSet(partitions...)
|
partitionSet := typeutil.NewUniqueSet(partitions...)
|
||||||
for partition, segments := range suite.segments[collection] {
|
for partition, segments := range suite.segments[collection] {
|
||||||
@ -928,7 +918,7 @@ func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ..
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, segment := range segments {
|
for _, segment := range segments {
|
||||||
suite.NotNil(suite.targetMgr.GetSegment(segment))
|
suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -936,11 +926,12 @@ func (suite *ServiceSuite) assertPartitionLoaded(collection int64, partitions ..
|
|||||||
func (suite *ServiceSuite) assertReleased(collection int64) {
|
func (suite *ServiceSuite) assertReleased(collection int64) {
|
||||||
suite.False(suite.meta.Exist(collection))
|
suite.False(suite.meta.Exist(collection))
|
||||||
for _, channel := range suite.channels[collection] {
|
for _, channel := range suite.channels[collection] {
|
||||||
suite.Nil(suite.targetMgr.GetDmChannel(channel))
|
suite.Nil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget))
|
||||||
}
|
}
|
||||||
for _, partitions := range suite.segments[collection] {
|
for _, partitions := range suite.segments[collection] {
|
||||||
for _, segment := range partitions {
|
for _, segment := range partitions {
|
||||||
suite.Nil(suite.targetMgr.GetSegment(segment))
|
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget))
|
||||||
|
suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.NextTarget))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,6 +18,7 @@ package task
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -387,7 +388,12 @@ func (ex *Executor) subDmChannel(task *ChannelTask, step int) error {
|
|||||||
partitions...,
|
partitions...,
|
||||||
)
|
)
|
||||||
|
|
||||||
dmChannel := ex.targetMgr.GetDmChannel(action.ChannelName())
|
dmChannel := ex.targetMgr.GetDmChannel(task.CollectionID(), action.ChannelName(), meta.NextTarget)
|
||||||
|
if dmChannel == nil {
|
||||||
|
msg := "channel does not exist in next target, skip it"
|
||||||
|
log.Warn(msg, zap.String("channelName", action.ChannelName()))
|
||||||
|
return errors.New(msg)
|
||||||
|
}
|
||||||
req := packSubDmChannelRequest(task, action, schema, loadMeta, dmChannel)
|
req := packSubDmChannelRequest(task, action, schema, loadMeta, dmChannel)
|
||||||
err = fillSubDmChannelRequest(ctx, req, ex.broker)
|
err = fillSubDmChannelRequest(ctx, req, ex.broker)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -22,13 +22,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
. "github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -404,7 +405,7 @@ func (scheduler *taskScheduler) GetNodeSegmentCntDelta(nodeID int64) int {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
segmentAction := action.(*SegmentAction)
|
segmentAction := action.(*SegmentAction)
|
||||||
segment := scheduler.targetMgr.GetSegment(segmentAction.SegmentID())
|
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), segmentAction.SegmentID(), meta.NextTarget)
|
||||||
if action.Type() == ActionTypeGrow {
|
if action.Type() == ActionTypeGrow {
|
||||||
delta += int(segment.GetNumOfRows())
|
delta += int(segment.GetNumOfRows())
|
||||||
} else {
|
} else {
|
||||||
@ -474,7 +475,7 @@ func (scheduler *taskScheduler) isRelated(task Task, node int64) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if task, ok := task.(*SegmentTask); ok {
|
if task, ok := task.(*SegmentTask); ok {
|
||||||
segment := scheduler.targetMgr.GetSegment(task.SegmentID())
|
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||||
if segment == nil {
|
if segment == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -637,7 +638,7 @@ func (scheduler *taskScheduler) checkSegmentTaskStale(task *SegmentTask) bool {
|
|||||||
for _, action := range task.Actions() {
|
for _, action := range task.Actions() {
|
||||||
switch action.Type() {
|
switch action.Type() {
|
||||||
case ActionTypeGrow:
|
case ActionTypeGrow:
|
||||||
segment := scheduler.targetMgr.GetSegment(task.SegmentID())
|
segment := scheduler.targetMgr.GetHistoricalSegment(task.CollectionID(), task.SegmentID(), meta.NextTarget)
|
||||||
if segment == nil {
|
if segment == nil {
|
||||||
log.Warn("task stale due tu the segment to load not exists in targets",
|
log.Warn("task stale due tu the segment to load not exists in targets",
|
||||||
zap.Int64("segment", task.segmentID))
|
zap.Int64("segment", task.segmentID))
|
||||||
@ -671,7 +672,7 @@ func (scheduler *taskScheduler) checkChannelTaskStale(task *ChannelTask) bool {
|
|||||||
for _, action := range task.Actions() {
|
for _, action := range task.Actions() {
|
||||||
switch action.Type() {
|
switch action.Type() {
|
||||||
case ActionTypeGrow:
|
case ActionTypeGrow:
|
||||||
if !scheduler.targetMgr.ContainDmChannel(task.Channel()) {
|
if scheduler.targetMgr.GetDmChannel(task.collectionID, task.Channel(), meta.NextTarget) == nil {
|
||||||
log.Warn("the task is stale, the channel to subscribe not exists in targets",
|
log.Warn("the task is stale, the channel to subscribe not exists in targets",
|
||||||
zap.String("channel", task.Channel()))
|
zap.String("channel", task.Channel()))
|
||||||
return true
|
return true
|
||||||
|
|||||||
@ -35,7 +35,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
|
||||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ func (suite *TaskSuite) SetupTest() {
|
|||||||
suite.store = meta.NewMetaStore(suite.kv)
|
suite.store = meta.NewMetaStore(suite.kv)
|
||||||
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store)
|
||||||
suite.dist = meta.NewDistributionManager()
|
suite.dist = meta.NewDistributionManager()
|
||||||
suite.target = meta.NewTargetManager()
|
|
||||||
suite.broker = meta.NewMockBroker(suite.T())
|
suite.broker = meta.NewMockBroker(suite.T())
|
||||||
|
suite.target = meta.NewTargetManager(suite.broker, suite.meta)
|
||||||
suite.nodeMgr = session.NewNodeManager()
|
suite.nodeMgr = session.NewNodeManager()
|
||||||
suite.cluster = session.NewMockCluster(suite.T())
|
suite.cluster = session.NewMockCluster(suite.T())
|
||||||
|
|
||||||
@ -198,12 +198,13 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||||||
|
|
||||||
// Test subscribe channel task
|
// Test subscribe channel task
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||||
for _, channel := range suite.subChannels {
|
for _, channel := range suite.subChannels {
|
||||||
suite.target.AddDmChannel(meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||||
CollectionID: suite.collection,
|
CollectionID: suite.collection,
|
||||||
ChannelName: channel,
|
ChannelName: channel,
|
||||||
UnflushedSegmentIds: []int64{suite.growingSegments[channel]},
|
UnflushedSegmentIds: []int64{suite.growingSegments[channel]},
|
||||||
}))
|
})
|
||||||
task, err := NewChannelTask(
|
task, err := NewChannelTask(
|
||||||
ctx,
|
ctx,
|
||||||
timeout,
|
timeout,
|
||||||
@ -217,6 +218,8 @@ func (suite *TaskSuite) TestSubscribeChannelTask() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
suite.AssertTaskNum(0, len(suite.subChannels), len(suite.subChannels), 0)
|
suite.AssertTaskNum(0, len(suite.subChannels), len(suite.subChannels), 0)
|
||||||
|
|
||||||
// Process tasks
|
// Process tasks
|
||||||
@ -258,11 +261,12 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||||||
|
|
||||||
// Test unsubscribe channel task
|
// Test unsubscribe channel task
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
dmChannels := make([]*datapb.VchannelInfo, 0)
|
||||||
for _, channel := range suite.unsubChannels {
|
for _, channel := range suite.unsubChannels {
|
||||||
suite.target.AddDmChannel(meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
dmChannels = append(dmChannels, &datapb.VchannelInfo{
|
||||||
CollectionID: suite.collection,
|
CollectionID: suite.collection,
|
||||||
ChannelName: channel,
|
ChannelName: channel,
|
||||||
}))
|
})
|
||||||
task, err := NewChannelTask(
|
task, err := NewChannelTask(
|
||||||
ctx,
|
ctx,
|
||||||
timeout,
|
timeout,
|
||||||
@ -277,6 +281,9 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
|
|
||||||
// Only first channel exists
|
// Only first channel exists
|
||||||
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
||||||
ID: targetNode,
|
ID: targetNode,
|
||||||
@ -340,11 +347,10 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||||||
ChannelName: channel.ChannelName,
|
ChannelName: channel.ChannelName,
|
||||||
}))
|
}))
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
segments := make([]*datapb.SegmentBinlogs, 0)
|
||||||
for _, segment := range suite.loadSegments {
|
for _, segment := range suite.loadSegments {
|
||||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
segments = append(segments, &datapb.SegmentBinlogs{
|
||||||
ID: segment,
|
SegmentID: segment,
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: partition,
|
|
||||||
InsertChannel: channel.ChannelName,
|
InsertChannel: channel.ChannelName,
|
||||||
})
|
})
|
||||||
task, err := NewSegmentTask(
|
task, err := NewSegmentTask(
|
||||||
@ -360,6 +366,8 @@ func (suite *TaskSuite) TestLoadSegmentTask() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segments, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
segmentsNum := len(suite.loadSegments)
|
segmentsNum := len(suite.loadSegments)
|
||||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||||
|
|
||||||
@ -424,11 +432,10 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() {
|
|||||||
ChannelName: channel.ChannelName,
|
ChannelName: channel.ChannelName,
|
||||||
}))
|
}))
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||||
for _, segment := range suite.loadSegments {
|
for _, segment := range suite.loadSegments {
|
||||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||||
ID: segment,
|
SegmentID: segment,
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: partition,
|
|
||||||
InsertChannel: channel.ChannelName,
|
InsertChannel: channel.ChannelName,
|
||||||
})
|
})
|
||||||
task, err := NewSegmentTask(
|
task, err := NewSegmentTask(
|
||||||
@ -444,6 +451,8 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
segmentsNum := len(suite.loadSegments)
|
segmentsNum := len(suite.loadSegments)
|
||||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||||
|
|
||||||
@ -559,9 +568,14 @@ func (suite *TaskSuite) TestReleaseGrowingSegmentTask() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
growings := map[int64]*meta.Segment{}
|
||||||
|
for _, segment := range suite.releaseSegments[1:] {
|
||||||
|
growings[segment] = utils.CreateTestSegment(suite.collection, 1, segment, targetNode, 1, "")
|
||||||
|
}
|
||||||
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
suite.dist.LeaderViewManager.Update(targetNode, &meta.LeaderView{
|
||||||
ID: targetNode,
|
ID: targetNode,
|
||||||
GrowingSegments: typeutil.NewUniqueSet(suite.releaseSegments[1:]...),
|
GrowingSegments: growings,
|
||||||
})
|
})
|
||||||
|
|
||||||
segmentsNum := len(suite.releaseSegments)
|
segmentsNum := len(suite.releaseSegments)
|
||||||
@ -634,13 +648,12 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||||||
}
|
}
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
segments := make([]*meta.Segment, 0)
|
segments := make([]*meta.Segment, 0)
|
||||||
|
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||||
for _, segment := range suite.moveSegments {
|
for _, segment := range suite.moveSegments {
|
||||||
segments = append(segments,
|
segments = append(segments,
|
||||||
utils.CreateTestSegment(suite.collection, partition, segment, sourceNode, 1, channel.ChannelName))
|
utils.CreateTestSegment(suite.collection, partition, segment, sourceNode, 1, channel.ChannelName))
|
||||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||||
ID: segment,
|
SegmentID: segment,
|
||||||
CollectionID: suite.collection,
|
|
||||||
PartitionID: partition,
|
|
||||||
InsertChannel: channel.ChannelName,
|
InsertChannel: channel.ChannelName,
|
||||||
})
|
})
|
||||||
view.Segments[segment] = &querypb.SegmentDist{NodeID: sourceNode, Version: 0}
|
view.Segments[segment] = &querypb.SegmentDist{NodeID: sourceNode, Version: 0}
|
||||||
@ -659,6 +672,8 @@ func (suite *TaskSuite) TestMoveSegmentTask() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
suite.dist.SegmentDistManager.Update(sourceNode, segments...)
|
suite.dist.SegmentDistManager.Update(sourceNode, segments...)
|
||||||
suite.dist.LeaderViewManager.Update(leader, view)
|
suite.dist.LeaderViewManager.Update(leader, view)
|
||||||
segmentsNum := len(suite.moveSegments)
|
segmentsNum := len(suite.moveSegments)
|
||||||
@ -726,12 +741,11 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||||||
ChannelName: channel.ChannelName,
|
ChannelName: channel.ChannelName,
|
||||||
}))
|
}))
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
segmentInfos := []*datapb.SegmentBinlogs{}
|
||||||
for _, segment := range suite.loadSegments {
|
for _, segment := range suite.loadSegments {
|
||||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||||
ID: segment,
|
SegmentID: segment,
|
||||||
CollectionID: suite.collection,
|
InsertChannel: channel.GetChannelName(),
|
||||||
PartitionID: partition,
|
|
||||||
InsertChannel: channel.ChannelName,
|
|
||||||
})
|
})
|
||||||
task, err := NewSegmentTask(
|
task, err := NewSegmentTask(
|
||||||
ctx,
|
ctx,
|
||||||
@ -748,6 +762,8 @@ func (suite *TaskSuite) TestTaskCanceled() {
|
|||||||
}
|
}
|
||||||
segmentsNum := len(suite.loadSegments)
|
segmentsNum := len(suite.loadSegments)
|
||||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, partition).Return(nil, segmentInfos, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, partition)
|
||||||
|
|
||||||
// Process tasks
|
// Process tasks
|
||||||
suite.dispatchAndWait(targetNode)
|
suite.dispatchAndWait(targetNode)
|
||||||
@ -802,19 +818,17 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||||||
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil)
|
suite.cluster.EXPECT().LoadSegments(mock.Anything, targetNode, mock.Anything).Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil)
|
||||||
|
|
||||||
// Test load segment task
|
// Test load segment task
|
||||||
suite.meta.ReplicaManager.Put(
|
suite.meta.ReplicaManager.Put(createReplica(suite.collection, targetNode))
|
||||||
createReplica(suite.collection, targetNode))
|
|
||||||
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
suite.dist.ChannelDistManager.Update(targetNode, meta.DmChannelFromVChannel(&datapb.VchannelInfo{
|
||||||
CollectionID: suite.collection,
|
CollectionID: suite.collection,
|
||||||
ChannelName: channel.ChannelName,
|
ChannelName: channel.ChannelName,
|
||||||
}))
|
}))
|
||||||
tasks := []Task{}
|
tasks := []Task{}
|
||||||
|
segmentInfos := make([]*datapb.SegmentBinlogs, 0)
|
||||||
for _, segment := range suite.loadSegments {
|
for _, segment := range suite.loadSegments {
|
||||||
suite.target.AddSegment(&datapb.SegmentInfo{
|
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||||
ID: segment,
|
SegmentID: segment,
|
||||||
CollectionID: suite.collection,
|
InsertChannel: channel.GetChannelName(),
|
||||||
PartitionID: partition,
|
|
||||||
InsertChannel: channel.ChannelName,
|
|
||||||
})
|
})
|
||||||
task, err := NewSegmentTask(
|
task, err := NewSegmentTask(
|
||||||
ctx,
|
ctx,
|
||||||
@ -829,6 +843,8 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||||||
err = suite.scheduler.Add(task)
|
err = suite.scheduler.Add(task)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
}
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1))
|
||||||
segmentsNum := len(suite.loadSegments)
|
segmentsNum := len(suite.loadSegments)
|
||||||
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum)
|
||||||
|
|
||||||
@ -851,7 +867,15 @@ func (suite *TaskSuite) TestSegmentTaskStale() {
|
|||||||
view.Segments[segment] = &querypb.SegmentDist{NodeID: targetNode, Version: 0}
|
view.Segments[segment] = &querypb.SegmentDist{NodeID: targetNode, Version: 0}
|
||||||
}
|
}
|
||||||
suite.dist.LeaderViewManager.Update(targetNode, view)
|
suite.dist.LeaderViewManager.Update(targetNode, view)
|
||||||
suite.target.RemoveSegment(suite.loadSegments[0])
|
segmentInfos = make([]*datapb.SegmentBinlogs, 0)
|
||||||
|
for _, segment := range suite.loadSegments[1:] {
|
||||||
|
segmentInfos = append(segmentInfos, &datapb.SegmentBinlogs{
|
||||||
|
SegmentID: segment,
|
||||||
|
InsertChannel: channel.GetChannelName(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(2)).Return(nil, segmentInfos, nil)
|
||||||
|
suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(2))
|
||||||
suite.dispatchAndWait(targetNode)
|
suite.dispatchAndWait(targetNode)
|
||||||
suite.AssertTaskNum(0, 0, 0, 0)
|
suite.AssertTaskNum(0, 0, 0, 0)
|
||||||
|
|
||||||
|
|||||||
@ -21,12 +21,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/samber/lo"
|
||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||||
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
||||||
"github.com/samber/lo"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager, replicaID int64) []*session.NodeInfo {
|
func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager, replicaID int64) []*session.NodeInfo {
|
||||||
@ -125,41 +123,3 @@ func SpawnReplicas(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager
|
|||||||
AssignNodesToReplicas(nodeMgr, replicas...)
|
AssignNodesToReplicas(nodeMgr, replicas...)
|
||||||
return replicas, replicaMgr.Put(replicas...)
|
return replicas, replicaMgr.Put(replicas...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterTargets fetch channels and segments of given collection(partitions) from DataCoord,
|
|
||||||
// and then registers them on Target Manager
|
|
||||||
func RegisterTargets(ctx context.Context,
|
|
||||||
targetMgr *meta.TargetManager,
|
|
||||||
broker meta.Broker,
|
|
||||||
collection int64, partitions []int64) error {
|
|
||||||
dmChannels := make(map[string][]*datapb.VchannelInfo)
|
|
||||||
|
|
||||||
for _, partitionID := range partitions {
|
|
||||||
log.Debug("get recovery info...",
|
|
||||||
zap.Int64("collectionID", collection),
|
|
||||||
zap.Int64("partitionID", partitionID))
|
|
||||||
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(ctx, collection, partitionID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register segments
|
|
||||||
for _, segmentBinlogs := range binlogs {
|
|
||||||
targetMgr.AddSegment(SegmentBinlogs2SegmentInfo(
|
|
||||||
collection,
|
|
||||||
partitionID,
|
|
||||||
segmentBinlogs))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, info := range vChannelInfos {
|
|
||||||
channelName := info.GetChannelName()
|
|
||||||
dmChannels[channelName] = append(dmChannels[channelName], info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Merge and register channels
|
|
||||||
for _, channels := range dmChannels {
|
|
||||||
dmChannel := MergeDmChannelInfo(channels)
|
|
||||||
targetMgr.AddDmChannel(dmChannel)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateTestLeaderView(id, collection int64, channel string, segments map[int64]int64, growings []int64) *meta.LeaderView {
|
func CreateTestLeaderView(id, collection int64, channel string, segments map[int64]int64, growings map[int64]*meta.Segment) *meta.LeaderView {
|
||||||
segmentVersions := make(map[int64]*querypb.SegmentDist)
|
segmentVersions := make(map[int64]*querypb.SegmentDist)
|
||||||
for segment, node := range segments {
|
for segment, node := range segments {
|
||||||
segmentVersions[segment] = &querypb.SegmentDist{
|
segmentVersions[segment] = &querypb.SegmentDist{
|
||||||
@ -36,7 +36,7 @@ func CreateTestLeaderView(id, collection int64, channel string, segments map[int
|
|||||||
CollectionID: collection,
|
CollectionID: collection,
|
||||||
Channel: channel,
|
Channel: channel,
|
||||||
Segments: segmentVersions,
|
Segments: segmentVersions,
|
||||||
GrowingSegments: typeutil.NewUniqueSet(growings...),
|
GrowingSegments: growings,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,6 +71,15 @@ func CreateTestCollection(collection int64, replica int32) *meta.Collection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreateTestPartition(collection int64, partitionID int64) *meta.Partition {
|
||||||
|
return &meta.Partition{
|
||||||
|
PartitionLoadInfo: &querypb.PartitionLoadInfo{
|
||||||
|
CollectionID: collection,
|
||||||
|
PartitionID: partitionID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func CreateTestSegmentInfo(collection, partition, segment int64, channel string) *datapb.SegmentInfo {
|
func CreateTestSegmentInfo(collection, partition, segment int64, channel string) *datapb.SegmentInfo {
|
||||||
return &datapb.SegmentInfo{
|
return &datapb.SegmentInfo{
|
||||||
ID: segment,
|
ID: segment,
|
||||||
|
|||||||
@ -198,7 +198,7 @@ func (s *DataSyncServiceSuite) TestRemoveEmptyFlowgraphByChannel() {
|
|||||||
channelName := fmt.Sprintf("%s_%d_1", Params.CommonCfg.RootCoordDml, defaultCollectionID)
|
channelName := fmt.Sprintf("%s_%d_1", Params.CommonCfg.RootCoordDml, defaultCollectionID)
|
||||||
deltaChannelName, err := funcutil.ConvertChannelName(channelName, Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta)
|
deltaChannelName, err := funcutil.ConvertChannelName(channelName, Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta)
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
err = s.dsService.metaReplica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, channelName, defaultSegmentVersion, segmentTypeSealed)
|
err = s.dsService.metaReplica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, channelName, defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeSealed)
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
||||||
_, err = s.dsService.addFlowGraphsForDeltaChannels(defaultCollectionID, []string{deltaChannelName})
|
_, err = s.dsService.addFlowGraphsForDeltaChannels(defaultCollectionID, []string{deltaChannelName})
|
||||||
|
|||||||
@ -40,6 +40,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -62,6 +63,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -95,6 +97,7 @@ func TestFlowGraphDeleteNode_delete(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeGrowing)
|
segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -116,6 +119,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -195,6 +199,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -220,6 +225,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -246,6 +252,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
@ -271,6 +278,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeSealed)
|
segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/common"
|
"github.com/milvus-io/milvus/internal/common"
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||||
@ -134,8 +135,16 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if !has {
|
if !has {
|
||||||
log.Info("Add growing segment", zap.Int64("collectionID", insertMsg.CollectionID), zap.Int64("segmentID", insertMsg.SegmentID))
|
log.Info("Add growing segment",
|
||||||
err = iNode.metaReplica.addSegment(insertMsg.SegmentID, insertMsg.PartitionID, insertMsg.CollectionID, insertMsg.ShardName, 0, segmentTypeGrowing)
|
zap.Int64("collectionID", insertMsg.CollectionID),
|
||||||
|
zap.Int64("segmentID", insertMsg.SegmentID),
|
||||||
|
zap.Uint64("startPosition", insertMsg.BeginTs()),
|
||||||
|
)
|
||||||
|
startPosition := &internalpb.MsgPosition{
|
||||||
|
ChannelName: insertMsg.ShardName,
|
||||||
|
Timestamp: insertMsg.BeginTs(),
|
||||||
|
}
|
||||||
|
err = iNode.metaReplica.addSegment(insertMsg.SegmentID, insertMsg.PartitionID, insertMsg.CollectionID, insertMsg.ShardName, 0, startPosition, segmentTypeGrowing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// error occurs when collection or partition cannot be found, collection and partition should be created before
|
// error occurs when collection or partition cannot be found, collection and partition should be created before
|
||||||
err = fmt.Errorf("insertNode addSegment failed, err = %s", err)
|
err = fmt.Errorf("insertNode addSegment failed, err = %s", err)
|
||||||
|
|||||||
@ -45,6 +45,7 @@ func getInsertNode() (*insertNode, error) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeGrowing)
|
segmentTypeGrowing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -1260,9 +1260,13 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||||||
sealedSegments := node.metaReplica.getSealedSegments()
|
sealedSegments := node.metaReplica.getSealedSegments()
|
||||||
shardClusters := node.ShardClusterService.GetShardClusters()
|
shardClusters := node.ShardClusterService.GetShardClusters()
|
||||||
|
|
||||||
channelGrowingsMap := make(map[string][]int64)
|
channelGrowingsMap := make(map[string]map[int64]*internalpb.MsgPosition)
|
||||||
for _, s := range growingSegments {
|
for _, s := range growingSegments {
|
||||||
channelGrowingsMap[s.vChannelID] = append(channelGrowingsMap[s.vChannelID], s.ID())
|
if _, ok := channelGrowingsMap[s.vChannelID]; !ok {
|
||||||
|
channelGrowingsMap[s.vChannelID] = make(map[int64]*internalpb.MsgPosition)
|
||||||
|
}
|
||||||
|
|
||||||
|
channelGrowingsMap[s.vChannelID][s.ID()] = s.startPosition
|
||||||
}
|
}
|
||||||
|
|
||||||
segmentVersionInfos := make([]*querypb.SegmentVersionInfo, 0, len(sealedSegments))
|
segmentVersionInfos := make([]*querypb.SegmentVersionInfo, 0, len(sealedSegments))
|
||||||
@ -1295,7 +1299,7 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
|
|||||||
Collection: sc.collectionID,
|
Collection: sc.collectionID,
|
||||||
Channel: sc.vchannelName,
|
Channel: sc.vchannelName,
|
||||||
SegmentDist: mapping,
|
SegmentDist: mapping,
|
||||||
GrowingSegmentIDs: channelGrowingsMap[sc.vchannelName],
|
GrowingSegments: channelGrowingsMap[sc.vchannelName],
|
||||||
}
|
}
|
||||||
leaderViews = append(leaderViews, view)
|
leaderViews = append(leaderViews, view)
|
||||||
|
|
||||||
|
|||||||
@ -29,8 +29,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/common"
|
"github.com/milvus-io/milvus/internal/common"
|
||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
@ -42,6 +40,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -105,7 +104,7 @@ type ReplicaInterface interface {
|
|||||||
|
|
||||||
// segment
|
// segment
|
||||||
// addSegment add a new segment to collectionReplica
|
// addSegment add a new segment to collectionReplica
|
||||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, segType segmentType) error
|
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, seekPosition *internalpb.MsgPosition, segType segmentType) error
|
||||||
// setSegment adds a segment to collectionReplica
|
// setSegment adds a segment to collectionReplica
|
||||||
setSegment(segment *Segment) error
|
setSegment(segment *Segment) error
|
||||||
// removeSegment removes a segment from collectionReplica
|
// removeSegment removes a segment from collectionReplica
|
||||||
@ -187,7 +186,7 @@ func (replica *metaReplica) printReplica() {
|
|||||||
log.Info("excludedSegments in collectionReplica", zap.Any("info", replica.excludedSegments))
|
log.Info("excludedSegments in collectionReplica", zap.Any("info", replica.excludedSegments))
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------- collection
|
// ----------------------------------------------------------------------------------------------------- collection
|
||||||
// getCollectionIDs gets all the collection ids in the collectionReplica
|
// getCollectionIDs gets all the collection ids in the collectionReplica
|
||||||
func (replica *metaReplica) getCollectionIDs() []UniqueID {
|
func (replica *metaReplica) getCollectionIDs() []UniqueID {
|
||||||
replica.mu.RLock()
|
replica.mu.RLock()
|
||||||
@ -396,7 +395,7 @@ func (replica *metaReplica) getSegmentInfosByColID(collectionID UniqueID) []*que
|
|||||||
return segmentInfos
|
return segmentInfos
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------- partition
|
// ----------------------------------------------------------------------------------------------------- partition
|
||||||
// addPartition adds a new partition to collection
|
// addPartition adds a new partition to collection
|
||||||
func (replica *metaReplica) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
func (replica *metaReplica) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||||
replica.mu.Lock()
|
replica.mu.Lock()
|
||||||
@ -565,9 +564,9 @@ func (replica *metaReplica) getSegmentIDsPrivate(partitionID UniqueID, segType s
|
|||||||
return partition.getSegmentIDs(segType)
|
return partition.getSegmentIDs(segType)
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------- segment
|
// ----------------------------------------------------------------------------------------------------- segment
|
||||||
// addSegment add a new segment to collectionReplica
|
// addSegment add a new segment to collectionReplica
|
||||||
func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, segType segmentType) error {
|
func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID Channel, version UniqueID, seekPosition *internalpb.MsgPosition, segType segmentType) error {
|
||||||
replica.mu.Lock()
|
replica.mu.Lock()
|
||||||
defer replica.mu.Unlock()
|
defer replica.mu.Unlock()
|
||||||
|
|
||||||
@ -578,7 +577,7 @@ func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID,
|
|||||||
collection.mu.Lock()
|
collection.mu.Lock()
|
||||||
defer collection.mu.Unlock()
|
defer collection.mu.Unlock()
|
||||||
|
|
||||||
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version, replica.cgoPool)
|
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version, seekPosition, replica.cgoPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -147,7 +147,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
|
|
||||||
const segmentNum = 3
|
const segmentNum = 3
|
||||||
for i := 0; i < segmentNum; i++ {
|
for i := 0; i < segmentNum; i++ {
|
||||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -162,7 +162,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
|
|
||||||
const segmentNum = 3
|
const segmentNum = 3
|
||||||
for i := 0; i < segmentNum; i++ {
|
for i := 0; i < segmentNum; i++ {
|
||||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -178,7 +178,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
|
|
||||||
const segmentNum = 3
|
const segmentNum = 3
|
||||||
for i := 0; i < segmentNum; i++ {
|
for i := 0; i < segmentNum; i++ {
|
||||||
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
err := replica.addSegment(UniqueID(i), defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
targetSeg, err := replica.getSegmentByID(UniqueID(i), segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -197,10 +197,10 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer replica.freeAll()
|
defer replica.freeAll()
|
||||||
|
|
||||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, segmentTypeGrowing)
|
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeGrowing)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
defer replica.freeAll()
|
defer replica.freeAll()
|
||||||
|
|
||||||
invalidType := commonpb.SegmentState_NotExist
|
invalidType := commonpb.SegmentState_NotExist
|
||||||
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, invalidType)
|
err = replica.addSegment(defaultSegmentID, defaultPartitionID, defaultCollectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, invalidType)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
_, err = replica.getSegmentByID(defaultSegmentID, invalidType)
|
_, err = replica.getSegmentByID(defaultSegmentID, invalidType)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@ -250,12 +250,12 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment1)
|
err = replica.setSegment(segment1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
segment2.setIndexedFieldInfo(fieldID, indexInfo)
|
segment2.setIndexedFieldInfo(fieldID, indexInfo)
|
||||||
err = replica.setSegment(segment2)
|
err = replica.setSegment(segment2)
|
||||||
@ -285,22 +285,22 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||||
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment1)
|
err = replica.setSegment(segment1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment2)
|
err = replica.setSegment(segment2)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment3)
|
err = replica.setSegment(segment3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, pool)
|
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment4)
|
err = replica.setSegment(segment4)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -355,13 +355,13 @@ func TestMetaReplica_BlackList(t *testing.T) {
|
|||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, pool)
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
replica.addSegmentsLoadingList([]UniqueID{1, 2, 3})
|
replica.addSegmentsLoadingList([]UniqueID{1, 2, 3})
|
||||||
|
|||||||
@ -93,6 +93,12 @@ const (
|
|||||||
defaultChannelName = "default-channel"
|
defaultChannelName = "default-channel"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultSegmentStartPosition = &internalpb.MsgPosition{
|
||||||
|
ChannelName: defaultChannelName,
|
||||||
|
MsgID: []byte{},
|
||||||
|
Timestamp: 0,
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultMsgLength = 100
|
defaultMsgLength = 100
|
||||||
defaultDelLength = 10
|
defaultDelLength = 10
|
||||||
@ -1190,6 +1196,7 @@ func genSealedSegment(schema *schemapb.CollectionSchema,
|
|||||||
vChannel,
|
vChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1281,6 +1288,7 @@ func genSimpleReplicaWithGrowingSegment() (ReplicaInterface, error) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeGrowing)
|
segmentTypeGrowing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -64,7 +64,7 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
|
|||||||
err = node.metaReplica.addPartition(collection.ID(), defaultPartitionID)
|
err = node.metaReplica.addPartition(collection.ID(), defaultPartitionID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = node.metaReplica.addSegment(segmentID, defaultPartitionID, collectionID, "", defaultSegmentVersion, segmentTypeSealed)
|
err = node.metaReplica.addSegment(segmentID, defaultPartitionID, collectionID, "", defaultSegmentVersion, defaultSegmentStartPosition, segmentTypeSealed)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -34,6 +34,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
"github.com/milvus-io/milvus/internal/util/concurrency"
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
@ -83,6 +84,7 @@ type Segment struct {
|
|||||||
partitionID UniqueID
|
partitionID UniqueID
|
||||||
collectionID UniqueID
|
collectionID UniqueID
|
||||||
version UniqueID
|
version UniqueID
|
||||||
|
startPosition *internalpb.MsgPosition // for growing segment release
|
||||||
|
|
||||||
vChannelID Channel
|
vChannelID Channel
|
||||||
lastMemSize int64
|
lastMemSize int64
|
||||||
@ -173,6 +175,7 @@ func newSegment(collection *Collection,
|
|||||||
vChannelID Channel,
|
vChannelID Channel,
|
||||||
segType segmentType,
|
segType segmentType,
|
||||||
version UniqueID,
|
version UniqueID,
|
||||||
|
startPosition *internalpb.MsgPosition,
|
||||||
pool *concurrency.Pool) (*Segment, error) {
|
pool *concurrency.Pool) (*Segment, error) {
|
||||||
/*
|
/*
|
||||||
CSegmentInterface
|
CSegmentInterface
|
||||||
@ -214,6 +217,7 @@ func newSegment(collection *Collection,
|
|||||||
partitionID: partitionID,
|
partitionID: partitionID,
|
||||||
collectionID: collectionID,
|
collectionID: collectionID,
|
||||||
version: version,
|
version: version,
|
||||||
|
startPosition: startPosition,
|
||||||
vChannelID: vChannelID,
|
vChannelID: vChannelID,
|
||||||
indexedFieldInfos: typeutil.NewConcurrentMap[int64, *IndexedFieldInfo](),
|
indexedFieldInfos: typeutil.NewConcurrentMap[int64, *IndexedFieldInfo](),
|
||||||
recentlyModified: atomic.NewBool(false),
|
recentlyModified: atomic.NewBool(false),
|
||||||
|
|||||||
@ -148,7 +148,7 @@ func (loader *segmentLoader) LoadSegment(ctx context.Context, req *querypb.LoadS
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion(), loader.cgoPool)
|
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion(), info.StartPosition, loader.cgoPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("load segment failed when create new segment",
|
log.Error("load segment failed when create new segment",
|
||||||
zap.Int64("partitionID", partitionID),
|
zap.Int64("partitionID", partitionID),
|
||||||
|
|||||||
@ -188,6 +188,7 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -346,6 +347,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -389,6 +391,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -429,7 +432,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||||||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, loader.cgoPool)
|
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, loader.cgoPool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||||
@ -458,7 +461,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||||||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, node.loader.cgoPool)
|
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, node.loader.cgoPool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||||
|
|||||||
@ -39,7 +39,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
//-------------------------------------------------------------------------------------- constructor and destructor
|
// -------------------------------------------------------------------------------------- constructor and destructor
|
||||||
func TestSegment_newSegment(t *testing.T) {
|
func TestSegment_newSegment(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -52,7 +52,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
deleteSegment(segment)
|
deleteSegment(segment)
|
||||||
@ -62,7 +62,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||||||
_, err = newSegment(collection,
|
_, err = newSegment(collection,
|
||||||
defaultSegmentID,
|
defaultSegmentID,
|
||||||
defaultPartitionID,
|
defaultPartitionID,
|
||||||
collectionID, "", 100, defaultSegmentVersion, pool)
|
collectionID, "", 100, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------------------------------------------------- stats functions
|
// -------------------------------------------------------------------------------------- stats functions
|
||||||
func TestSegment_getRowCount(t *testing.T) {
|
func TestSegment_getRowCount(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -106,7 +106,7 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ func TestSegment_retrieve(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -238,7 +238,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -317,7 +317,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||||||
deleteCollection(collection)
|
deleteCollection(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------------------------------------------------- dm & search functions
|
// -------------------------------------------------------------------------------------- dm & search functions
|
||||||
func TestSegment_segmentInsert(t *testing.T) {
|
func TestSegment_segmentInsert(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -328,7 +328,7 @@ func TestSegment_segmentInsert(t *testing.T) {
|
|||||||
collection := newCollection(collectionID, schema)
|
collection := newCollection(collectionID, schema)
|
||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -375,7 +375,7 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -461,7 +461,7 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||||||
deleteCollection(collection)
|
deleteCollection(collection)
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------------------------------------------------- preDm functions
|
// -------------------------------------------------------------------------------------- preDm functions
|
||||||
func TestSegment_segmentPreInsert(t *testing.T) {
|
func TestSegment_segmentPreInsert(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -472,7 +472,7 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -494,7 +494,7 @@ func TestSegment_segmentPreDelete(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -543,6 +543,7 @@ func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
ids := []int64{1, 2, 3}
|
ids := []int64{1, 2, 3}
|
||||||
@ -624,6 +625,7 @@ func TestSegment_BasicMetrics(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -675,6 +677,7 @@ func TestSegment_fillIndexedFieldsData(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
pool)
|
pool)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -1024,6 +1027,7 @@ func TestUpdateBloomFilter(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeGrowing)
|
segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
||||||
@ -1049,6 +1053,7 @@ func TestUpdateBloomFilter(t *testing.T) {
|
|||||||
defaultCollectionID,
|
defaultCollectionID,
|
||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
|
defaultSegmentStartPosition,
|
||||||
segmentTypeGrowing)
|
segmentTypeGrowing)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
seg, err := replica.getSegmentByID(defaultSegmentID, segmentTypeGrowing)
|
||||||
|
|||||||
@ -679,6 +679,9 @@ type queryCoordConfig struct {
|
|||||||
LoadTimeoutSeconds time.Duration
|
LoadTimeoutSeconds time.Duration
|
||||||
CheckHandoffInterval time.Duration
|
CheckHandoffInterval time.Duration
|
||||||
EnableActiveStandby bool
|
EnableActiveStandby bool
|
||||||
|
|
||||||
|
NextTargetSurviveTime time.Duration
|
||||||
|
UpdateNextTargetInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *queryCoordConfig) init(base *BaseTable) {
|
func (p *queryCoordConfig) init(base *BaseTable) {
|
||||||
@ -703,6 +706,8 @@ func (p *queryCoordConfig) init(base *BaseTable) {
|
|||||||
p.initLoadTimeoutSeconds()
|
p.initLoadTimeoutSeconds()
|
||||||
p.initCheckHandoffInterval()
|
p.initCheckHandoffInterval()
|
||||||
p.initEnableActiveStandby()
|
p.initEnableActiveStandby()
|
||||||
|
p.initNextTargetSurviveTime()
|
||||||
|
p.initUpdateNextTargetInterval()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *queryCoordConfig) initTaskRetryNum() {
|
func (p *queryCoordConfig) initTaskRetryNum() {
|
||||||
@ -822,6 +827,24 @@ func (p *queryCoordConfig) initCheckHandoffInterval() {
|
|||||||
p.CheckHandoffInterval = time.Duration(checkHandoffInterval) * time.Millisecond
|
p.CheckHandoffInterval = time.Duration(checkHandoffInterval) * time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *queryCoordConfig) initNextTargetSurviveTime() {
|
||||||
|
interval := p.Base.LoadWithDefault("queryCoord.NextTargetSurviveTime", "300")
|
||||||
|
nextTargetSurviveTime, err := strconv.ParseInt(interval, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
p.NextTargetSurviveTime = time.Duration(nextTargetSurviveTime) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *queryCoordConfig) initUpdateNextTargetInterval() {
|
||||||
|
interval := p.Base.LoadWithDefault("queryCoord.UpdateNextTargetInterval", "30")
|
||||||
|
updateNextTargetInterval, err := strconv.ParseInt(interval, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
p.UpdateNextTargetInterval = time.Duration(updateNextTargetInterval) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
// /////////////////////////////////////////////////////////////////////////////
|
// /////////////////////////////////////////////////////////////////////////////
|
||||||
// --- querynode ---
|
// --- querynode ---
|
||||||
type queryNodeConfig struct {
|
type queryNodeConfig struct {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user