Refactor handoff handler (#17806)

Signed-off-by: yah01 <yang.cen@zilliz.com>
Co-authored-by: xiaofan-luan <xiaofan.luan@zilliz.com>

Co-authored-by: xiaofan-luan <xiaofan.luan@zilliz.com>
This commit is contained in:
yah01 2022-06-29 23:26:19 +08:00 committed by GitHub
parent 95a3830dc6
commit ef6859a4d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 1286 additions and 1105 deletions

View File

@ -455,7 +455,7 @@ func (t *compactionTrigger) getCandidateSegments(channel string, partitionID Uni
segments := t.meta.GetSegmentsByChannel(channel)
var res []*SegmentInfo
for _, s := range segments {
if !isFlush(s) || s.GetInsertChannel() != channel ||
if !isSegmentHealthy(s) || !isFlush(s) || s.GetInsertChannel() != channel ||
s.GetPartitionID() != partitionID || s.isCompacting || t.segRefer.HasSegmentLock(s.ID) {
continue
}

View File

@ -30,14 +30,14 @@ import (
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util"
"go.uber.org/zap"
)
const (
metaPrefix = "datacoord-meta"
segmentPrefix = metaPrefix + "/s"
channelRemovePrefix = metaPrefix + "/channel-removal"
handoffSegmentPrefix = "querycoord-handoff"
metaPrefix = "datacoord-meta"
segmentPrefix = metaPrefix + "/s"
channelRemovePrefix = metaPrefix + "/channel-removal"
removeFlagTomestone = "removed"
)
@ -1049,7 +1049,7 @@ func buildSegmentPath(collectionID UniqueID, partitionID UniqueID, segmentID Uni
// buildQuerySegmentPath common logic mapping segment info to corresponding key of queryCoord in kv store
func buildQuerySegmentPath(collectionID UniqueID, partitionID UniqueID, segmentID UniqueID) string {
return fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, collectionID, partitionID, segmentID)
return fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, collectionID, partitionID, segmentID)
}
// buildChannelRemovePat builds vchannel remove flag path

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util"
"github.com/stretchr/testify/assert"
)
@ -325,7 +326,7 @@ func TestSaveHandoffMeta(t *testing.T) {
err = meta.saveSegmentInfo(segmentInfo)
assert.Nil(t, err)
keys, _, err := meta.client.LoadWithPrefix(handoffSegmentPrefix)
keys, _, err := meta.client.LoadWithPrefix(util.HandoffSegmentPrefix)
assert.Nil(t, err)
assert.Equal(t, 1, len(keys))
segmentID, err := strconv.ParseInt(filepath.Base(keys[0]), 10, 64)

View File

@ -266,6 +266,7 @@ message ReplicaSegmentsInfo {
message HandoffSegmentsRequest {
common.MsgBase base = 1;
repeated SegmentInfo segmentInfos = 2;
repeated int64 released_segments = 3;
}
message LoadBalanceRequest {

View File

@ -1968,6 +1968,7 @@ func (m *ReplicaSegmentsInfo) GetSegmentIds() []int64 {
type HandoffSegmentsRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
SegmentInfos []*SegmentInfo `protobuf:"bytes,2,rep,name=segmentInfos,proto3" json:"segmentInfos,omitempty"`
ReleasedSegments []int64 `protobuf:"varint,3,rep,packed,name=released_segments,json=releasedSegments,proto3" json:"released_segments,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2012,6 +2013,13 @@ func (m *HandoffSegmentsRequest) GetSegmentInfos() []*SegmentInfo {
return nil
}
func (m *HandoffSegmentsRequest) GetReleasedSegments() []int64 {
if m != nil {
return m.ReleasedSegments
}
return nil
}
type LoadBalanceRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
SourceNodeIDs []int64 `protobuf:"varint,2,rep,packed,name=source_nodeIDs,json=sourceNodeIDs,proto3" json:"source_nodeIDs,omitempty"`
@ -2796,190 +2804,191 @@ func init() {
func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) }
var fileDescriptor_aab7cc9a69ed26e8 = []byte{
// 2921 bytes of a gzipped FileDescriptorProto
// 2929 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3a, 0xc9, 0x8f, 0x1c, 0x57,
0xf9, 0x53, 0xbd, 0x4d, 0xf7, 0xd7, 0x5b, 0xf9, 0x8d, 0x3d, 0xee, 0xf4, 0xcf, 0x4e, 0x26, 0xe5,
0x38, 0x99, 0xdf, 0x84, 0x8c, 0xcd, 0x38, 0x44, 0x09, 0x04, 0x09, 0x7b, 0x26, 0x9e, 0x0c, 0xb6,
0x27, 0x43, 0xb5, 0x1d, 0x90, 0x15, 0xa9, 0xa8, 0xae, 0x7a, 0xd3, 0x53, 0x72, 0x2d, 0xed, 0x7a,
0xd5, 0xe3, 0x4c, 0xce, 0x08, 0x89, 0x4d, 0x08, 0x71, 0x45, 0x11, 0x07, 0x38, 0x20, 0x11, 0x71,
0xe1, 0xc2, 0x05, 0x71, 0xe3, 0x8a, 0xc4, 0x1f, 0xc0, 0x11, 0xc1, 0x1d, 0x71, 0x45, 0x6f, 0xa9,
0xea, 0x5a, 0xdd, 0x3d, 0xd3, 0x71, 0x12, 0x24, 0x6e, 0x55, 0xdf, 0x5b, 0xbe, 0x7d, 0x7b, 0xef,
0xc1, 0xb9, 0xc7, 0x13, 0xec, 0x9f, 0x68, 0x86, 0xe7, 0xf9, 0xe6, 0xe6, 0xd8, 0xf7, 0x02, 0x0f,
0x21, 0xc7, 0xb2, 0x8f, 0x27, 0x84, 0xff, 0x6d, 0xb2, 0xf1, 0x7e, 0xcb, 0xf0, 0x1c, 0xc7, 0x73,
0x39, 0xac, 0xdf, 0x8a, 0xcf, 0xe8, 0x77, 0x2c, 0x37, 0xc0, 0xbe, 0xab, 0xdb, 0xe1, 0x28, 0x31,
0x8e, 0xb0, 0xa3, 0x8b, 0x3f, 0xd9, 0xd4, 0x03, 0x3d, 0xbe, 0xbf, 0xf2, 0x3d, 0x09, 0x56, 0x07,
0x47, 0xde, 0x93, 0x6d, 0xcf, 0xb6, 0xb1, 0x11, 0x58, 0x9e, 0x4b, 0x54, 0xfc, 0x78, 0x82, 0x49,
0x80, 0xae, 0x43, 0x65, 0xa8, 0x13, 0xdc, 0x93, 0xd6, 0xa4, 0xf5, 0xe6, 0xd6, 0xa5, 0xcd, 0x04,
0x25, 0x82, 0x84, 0x7b, 0x64, 0x74, 0x4b, 0x27, 0x58, 0x65, 0x33, 0x11, 0x82, 0x8a, 0x39, 0xdc,
0xdb, 0xe9, 0x95, 0xd6, 0xa4, 0xf5, 0xb2, 0xca, 0xbe, 0xd1, 0x4b, 0xd0, 0x36, 0xa2, 0xbd, 0xf7,
0x76, 0x48, 0xaf, 0xbc, 0x56, 0x5e, 0x2f, 0xab, 0x49, 0xa0, 0xf2, 0x37, 0x09, 0x2e, 0x66, 0xc8,
0x20, 0x63, 0xcf, 0x25, 0x18, 0xdd, 0x80, 0x1a, 0x09, 0xf4, 0x60, 0x42, 0x04, 0x25, 0xff, 0x97,
0x4b, 0xc9, 0x80, 0x4d, 0x51, 0xc5, 0xd4, 0x2c, 0xda, 0x52, 0x0e, 0x5a, 0xf4, 0x65, 0x38, 0x6f,
0xb9, 0xf7, 0xb0, 0xe3, 0xf9, 0x27, 0xda, 0x18, 0xfb, 0x06, 0x76, 0x03, 0x7d, 0x84, 0x43, 0x1a,
0x57, 0xc2, 0xb1, 0x83, 0xe9, 0x10, 0x7a, 0x03, 0x2e, 0x72, 0x2d, 0x11, 0xec, 0x1f, 0x5b, 0x06,
0xd6, 0xf4, 0x63, 0xdd, 0xb2, 0xf5, 0xa1, 0x8d, 0x7b, 0x95, 0xb5, 0xf2, 0x7a, 0x5d, 0xbd, 0xc0,
0x86, 0x07, 0x7c, 0xf4, 0x66, 0x38, 0xa8, 0xfc, 0x5a, 0x82, 0x0b, 0x94, 0xc3, 0x03, 0xdd, 0x0f,
0xac, 0x67, 0x20, 0x67, 0x05, 0x5a, 0x71, 0xde, 0x7a, 0x65, 0x36, 0x96, 0x80, 0xd1, 0x39, 0xe3,
0x10, 0x3d, 0x95, 0x49, 0x85, 0xb1, 0x99, 0x80, 0x29, 0xbf, 0x12, 0x06, 0x11, 0xa7, 0x73, 0x11,
0x45, 0xa4, 0x71, 0x96, 0xb2, 0x38, 0xcf, 0xa0, 0x06, 0xe5, 0xef, 0x12, 0x5c, 0xb8, 0xeb, 0xe9,
0xe6, 0xd4, 0x60, 0x3e, 0x7b, 0x71, 0x7e, 0x1d, 0x6a, 0xdc, 0xbb, 0x7a, 0x15, 0x86, 0xeb, 0x6a,
0x12, 0x97, 0xf0, 0xbc, 0x29, 0x85, 0x03, 0x06, 0x50, 0xc5, 0x22, 0x74, 0x15, 0x3a, 0x3e, 0x1e,
0xdb, 0x96, 0xa1, 0x6b, 0xee, 0xc4, 0x19, 0x62, 0xbf, 0x57, 0x5d, 0x93, 0xd6, 0xab, 0x6a, 0x5b,
0x40, 0xf7, 0x19, 0x50, 0xf9, 0x85, 0x04, 0x3d, 0x15, 0xdb, 0x58, 0x27, 0xf8, 0xf3, 0x64, 0x76,
0x15, 0x6a, 0xae, 0x67, 0xe2, 0xbd, 0x1d, 0xc6, 0x6c, 0x59, 0x15, 0x7f, 0xca, 0x8f, 0x4a, 0x5c,
0x11, 0x5f, 0x70, 0xbb, 0x8e, 0x29, 0xab, 0xfa, 0xe9, 0x28, 0xab, 0x96, 0xa7, 0xac, 0x3f, 0x4d,
0x95, 0xf5, 0x45, 0x17, 0xc8, 0x54, 0xa1, 0xd5, 0x84, 0x42, 0x7f, 0x23, 0xc1, 0x73, 0xbb, 0x38,
0x88, 0xc8, 0xa7, 0xfe, 0x8c, 0xbf, 0xa0, 0xc1, 0xea, 0x13, 0x09, 0xfa, 0x79, 0xb4, 0x2e, 0x12,
0xb0, 0x1e, 0xc2, 0x6a, 0x84, 0x43, 0x33, 0x31, 0x31, 0x7c, 0x6b, 0xcc, 0xd4, 0xc8, 0x42, 0x57,
0x73, 0xeb, 0xca, 0x66, 0x36, 0x25, 0x6f, 0xa6, 0x29, 0xb8, 0x10, 0x6d, 0xb1, 0x13, 0xdb, 0x41,
0xf9, 0x89, 0x04, 0x17, 0x76, 0x71, 0x30, 0xc0, 0x23, 0x07, 0xbb, 0xc1, 0x9e, 0x7b, 0xe8, 0x9d,
0x5d, 0xae, 0xcf, 0x03, 0x10, 0xb1, 0x4f, 0x14, 0x56, 0x63, 0x90, 0x79, 0x64, 0xcc, 0xb2, 0x7f,
0x9a, 0x9e, 0x45, 0x64, 0xf7, 0x15, 0xa8, 0x5a, 0xee, 0xa1, 0x17, 0x8a, 0xea, 0x85, 0x3c, 0x51,
0xc5, 0x91, 0xf1, 0xd9, 0x8a, 0xcb, 0xa9, 0x38, 0xd2, 0x7d, 0xf3, 0x2e, 0xd6, 0x4d, 0xec, 0x2f,
0x60, 0x6e, 0x69, 0xb6, 0x4b, 0x39, 0x6c, 0xff, 0x58, 0x82, 0x8b, 0x19, 0x84, 0x8b, 0xf0, 0xfd,
0x36, 0xd4, 0x08, 0xdd, 0x2c, 0x64, 0xfc, 0xa5, 0x5c, 0xc6, 0x63, 0xe8, 0xee, 0x5a, 0x24, 0x50,
0xc5, 0x1a, 0xc5, 0x03, 0x39, 0x3d, 0x86, 0x5e, 0x84, 0x96, 0x71, 0xa4, 0xbb, 0x2e, 0xb6, 0x35,
0x57, 0x77, 0xb8, 0x00, 0x1a, 0x6a, 0x53, 0xc0, 0xf6, 0x75, 0x07, 0xa3, 0xe7, 0xa0, 0x4e, 0x5d,
0x56, 0xb3, 0xcc, 0x50, 0xfd, 0xcb, 0xcc, 0x85, 0x4d, 0x82, 0x2e, 0x03, 0xb0, 0x21, 0xdd, 0x34,
0x7d, 0x9e, 0x46, 0x1b, 0x6a, 0x83, 0x42, 0x6e, 0x52, 0x80, 0xf2, 0x33, 0x09, 0x5a, 0x34, 0x66,
0xdf, 0xc3, 0x81, 0x4e, 0xf5, 0x80, 0xde, 0x82, 0x86, 0xed, 0xe9, 0xa6, 0x16, 0x9c, 0x8c, 0x39,
0xaa, 0x4e, 0x5a, 0xd6, 0x9c, 0x05, 0xba, 0xe8, 0xfe, 0xc9, 0x18, 0xab, 0x75, 0x5b, 0x7c, 0xcd,
0x23, 0xef, 0x8c, 0x2b, 0x97, 0x73, 0x5c, 0xf9, 0xfb, 0x55, 0x58, 0xfd, 0xb6, 0x1e, 0x18, 0x47,
0x3b, 0xce, 0x36, 0x67, 0x72, 0x01, 0x23, 0x98, 0xc6, 0xb6, 0x52, 0x3c, 0xb6, 0x7d, 0x6a, 0xb1,
0x33, 0xb2, 0xf3, 0x6a, 0x9e, 0x9d, 0xd3, 0x22, 0x7b, 0xf3, 0x7d, 0xa1, 0xaa, 0x98, 0x9d, 0xc7,
0x72, 0x50, 0xed, 0x2c, 0x39, 0x68, 0x1b, 0xda, 0xf8, 0x43, 0xc3, 0x9e, 0x50, 0x9d, 0x33, 0xec,
0xcb, 0x0c, 0xfb, 0xf3, 0x39, 0xd8, 0xe3, 0x4e, 0xd6, 0x12, 0x8b, 0xf6, 0x04, 0x0d, 0x5c, 0xd5,
0x0e, 0x0e, 0xf4, 0x5e, 0x9d, 0x91, 0xb1, 0x56, 0xa4, 0xea, 0xd0, 0x3e, 0xb8, 0xba, 0xe9, 0x1f,
0xba, 0x04, 0x0d, 0x91, 0xf1, 0xf6, 0x76, 0x7a, 0x0d, 0x26, 0xbe, 0x29, 0x00, 0xe9, 0xd0, 0x16,
0x11, 0x48, 0x50, 0x08, 0x8c, 0xc2, 0xb7, 0xf3, 0x10, 0xe4, 0x2b, 0x3b, 0x4e, 0x39, 0x79, 0xc7,
0x0d, 0xfc, 0x13, 0xb5, 0x45, 0x62, 0xa0, 0xbe, 0x06, 0xe7, 0x32, 0x53, 0x90, 0x0c, 0xe5, 0x47,
0xf8, 0x84, 0x19, 0x48, 0x59, 0xa5, 0x9f, 0xe8, 0x75, 0xa8, 0x1e, 0xeb, 0xf6, 0x04, 0x33, 0x03,
0x98, 0x2d, 0x23, 0x3e, 0xf9, 0xab, 0xa5, 0x37, 0x25, 0xe5, 0xe3, 0x12, 0x3c, 0xc7, 0x69, 0xc3,
0x76, 0xa0, 0x7f, 0xbe, 0xb6, 0x18, 0xd9, 0x59, 0xe5, 0x54, 0x76, 0x76, 0x19, 0x20, 0x2c, 0x56,
0x2c, 0x53, 0xa4, 0xf7, 0x48, 0x4b, 0x66, 0xd2, 0x04, 0x1a, 0xa7, 0x35, 0x01, 0xe5, 0x8f, 0x15,
0xe8, 0x0a, 0xd9, 0xd1, 0x19, 0x2c, 0x80, 0x5c, 0x82, 0x46, 0x94, 0x7a, 0x84, 0x1a, 0xa6, 0x00,
0xb4, 0x06, 0xcd, 0x98, 0xfb, 0x08, 0x39, 0xc4, 0x41, 0x73, 0x09, 0x23, 0x2c, 0x24, 0x2a, 0xb1,
0x42, 0xe2, 0x32, 0xc0, 0xa1, 0x3d, 0x21, 0x47, 0x5a, 0x60, 0x39, 0x38, 0xe4, 0x94, 0x41, 0xee,
0x5b, 0x0e, 0x46, 0x37, 0xa1, 0x35, 0xb4, 0x5c, 0xdb, 0x1b, 0x69, 0x63, 0x3d, 0x38, 0x22, 0xbd,
0x5a, 0xa1, 0xc3, 0xdc, 0xb6, 0xb0, 0x6d, 0xde, 0x62, 0x73, 0xd5, 0x26, 0x5f, 0x73, 0x40, 0x97,
0xa0, 0xe7, 0xa1, 0xe9, 0x4e, 0x1c, 0xcd, 0x3b, 0xd4, 0x7c, 0xef, 0x09, 0x75, 0x39, 0x86, 0xc2,
0x9d, 0x38, 0xef, 0x1d, 0xaa, 0xde, 0x13, 0x1a, 0xfa, 0x1b, 0x34, 0x09, 0x10, 0xdb, 0x1b, 0x91,
0x5e, 0x7d, 0xae, 0xfd, 0xa7, 0x0b, 0xe8, 0x6a, 0x93, 0x9a, 0x19, 0x5b, 0xdd, 0x98, 0x6f, 0x75,
0xb4, 0x00, 0xbd, 0x0c, 0x1d, 0xc3, 0x73, 0xc6, 0x3a, 0x93, 0xd0, 0x6d, 0xdf, 0x73, 0x98, 0xbf,
0x95, 0xd5, 0x14, 0x14, 0x6d, 0x43, 0xd3, 0x72, 0x4d, 0xfc, 0xa1, 0x70, 0xca, 0x26, 0xc3, 0xa3,
0xe4, 0xa9, 0x9c, 0x21, 0xda, 0xa3, 0x73, 0x99, 0xd2, 0xc1, 0x0a, 0x3f, 0x09, 0xcd, 0x48, 0xa1,
0x6f, 0x13, 0xeb, 0x23, 0xdc, 0x6b, 0x71, 0x2d, 0x0a, 0xd8, 0xc0, 0xfa, 0x08, 0xd3, 0x22, 0xd9,
0x72, 0x09, 0xf6, 0x03, 0x4d, 0x18, 0x65, 0xaf, 0xcd, 0xd2, 0x56, 0x9b, 0x43, 0x85, 0x2f, 0x29,
0xbf, 0x2b, 0x41, 0x27, 0x89, 0x08, 0xf5, 0x60, 0xf9, 0x90, 0x41, 0x42, 0xeb, 0x09, 0x7f, 0x29,
0x5a, 0xec, 0xd2, 0x0e, 0x5a, 0x63, 0xb4, 0x30, 0xe3, 0xa9, 0xab, 0x4d, 0x0e, 0x63, 0x1b, 0x50,
0x23, 0xe0, 0xec, 0xb1, 0x4c, 0x59, 0x66, 0x28, 0x1b, 0x0c, 0xc2, 0xf2, 0x64, 0x0f, 0x96, 0x39,
0x1b, 0xa1, 0xe9, 0x84, 0xbf, 0x74, 0x64, 0x38, 0xb1, 0x18, 0x56, 0x6e, 0x3a, 0xe1, 0x2f, 0xda,
0x81, 0x16, 0xdf, 0x72, 0xac, 0xfb, 0xba, 0x13, 0x1a, 0xce, 0x8b, 0xb9, 0xee, 0x7e, 0x07, 0x9f,
0xbc, 0x4f, 0xa3, 0xc7, 0x81, 0x6e, 0xf9, 0x2a, 0x17, 0xf4, 0x01, 0x5b, 0x85, 0xd6, 0x41, 0xe6,
0xbb, 0x1c, 0x5a, 0x36, 0x16, 0x26, 0xb8, 0xcc, 0x92, 0x71, 0x87, 0xc1, 0x6f, 0x5b, 0x36, 0xe6,
0x56, 0x16, 0xb1, 0xc0, 0x44, 0x5b, 0xe7, 0x46, 0xc6, 0x20, 0x54, 0xb0, 0xca, 0x5f, 0xcb, 0xb0,
0x42, 0x7d, 0x4d, 0xb8, 0xdd, 0x02, 0xd1, 0xe8, 0x32, 0x80, 0x49, 0x02, 0x2d, 0x11, 0x91, 0x1a,
0x26, 0x09, 0xf6, 0x79, 0x50, 0x7a, 0x2b, 0x0c, 0x38, 0xe5, 0xe2, 0x5a, 0x37, 0xe5, 0xfb, 0xd9,
0xe4, 0x76, 0xa6, 0x6e, 0xf8, 0x0a, 0xb4, 0x89, 0x37, 0xf1, 0x0d, 0xac, 0x25, 0xba, 0x92, 0x16,
0x07, 0xee, 0xe7, 0xc7, 0xcc, 0x5a, 0x6e, 0x57, 0x1e, 0x8b, 0x6e, 0xcb, 0x8b, 0x25, 0xb8, 0x7a,
0x3a, 0xc1, 0xdd, 0x81, 0x2e, 0x73, 0x3f, 0x6d, 0xec, 0x11, 0xde, 0xdc, 0x09, 0xaf, 0x4d, 0x79,
0x53, 0x74, 0x0a, 0x77, 0x8f, 0x8c, 0x0e, 0xc4, 0x54, 0xb5, 0xc3, 0x96, 0x86, 0xbf, 0x44, 0xf9,
0x79, 0x09, 0x56, 0x45, 0xb3, 0xb8, 0xb8, 0x62, 0x8b, 0xd2, 0x4c, 0x18, 0x35, 0xcb, 0x4f, 0x69,
0xbf, 0x2a, 0x73, 0x94, 0x41, 0xd5, 0x9c, 0x32, 0x28, 0xd9, 0x82, 0xd4, 0x32, 0x2d, 0xc8, 0x0d,
0xa8, 0x12, 0xc3, 0x1b, 0x63, 0xa6, 0x86, 0xce, 0xd6, 0xe5, 0x3c, 0x35, 0xec, 0xe8, 0x81, 0x3e,
0xa0, 0x93, 0x54, 0x3e, 0x57, 0xf9, 0x87, 0x04, 0xed, 0x01, 0xd6, 0x7d, 0xe3, 0x28, 0x14, 0xc6,
0x1b, 0x50, 0xf6, 0xf1, 0x63, 0x21, 0x8b, 0x97, 0x0a, 0x04, 0x9d, 0x58, 0xa2, 0xd2, 0x05, 0xe8,
0x05, 0x68, 0x9a, 0x8e, 0x1d, 0xc5, 0xa2, 0x12, 0x0b, 0x0c, 0x60, 0x3a, 0xb6, 0x08, 0x44, 0x29,
0xfa, 0xcb, 0x19, 0xfa, 0x37, 0xe0, 0xdc, 0xa1, 0xef, 0x39, 0x1a, 0xab, 0xd3, 0x35, 0x9b, 0x95,
0xe7, 0x4c, 0x58, 0x75, 0xb5, 0x4b, 0x07, 0x62, 0x55, 0xfb, 0x94, 0xd7, 0xea, 0x29, 0x78, 0xfd,
0xa7, 0x04, 0xad, 0x6f, 0xd1, 0xa1, 0x90, 0xd5, 0x37, 0xe3, 0xac, 0xbe, 0x5c, 0xc0, 0xaa, 0x8a,
0x03, 0xdf, 0xc2, 0xc7, 0xf8, 0xbf, 0x8c, 0xd9, 0x3f, 0x4b, 0xd0, 0x1f, 0x9c, 0xb8, 0x86, 0xca,
0xbd, 0x69, 0x71, 0x93, 0xbf, 0x02, 0xed, 0xe3, 0x44, 0x93, 0xc4, 0x99, 0x6e, 0x1d, 0xc7, 0xbb,
0x24, 0x15, 0xe4, 0xb0, 0x16, 0x12, 0xcc, 0x86, 0xc1, 0xed, 0x95, 0x3c, 0xaa, 0x53, 0xc4, 0xb1,
0xe0, 0xd0, 0xf5, 0x93, 0x40, 0xc5, 0x87, 0x95, 0x9c, 0x79, 0xe8, 0x22, 0x2c, 0x8b, 0x86, 0x4c,
0x24, 0x31, 0xee, 0x83, 0x26, 0xcd, 0x61, 0xd3, 0x23, 0x05, 0xcb, 0xcc, 0x16, 0x40, 0x26, 0x55,
0x5f, 0x54, 0x39, 0x9b, 0x19, 0xf5, 0x98, 0x44, 0xf9, 0xa9, 0x04, 0xab, 0xef, 0xea, 0xae, 0xe9,
0x1d, 0x1e, 0x2e, 0x2e, 0xb9, 0x6d, 0x48, 0x14, 0xd5, 0xf3, 0xb6, 0xeb, 0x89, 0x45, 0xca, 0x6f,
0x4b, 0x80, 0x68, 0x10, 0xbd, 0xa5, 0xdb, 0xba, 0x6b, 0xe0, 0xb3, 0x53, 0x73, 0x15, 0x3a, 0x89,
0xd0, 0x1f, 0x1d, 0xd6, 0xc7, 0x63, 0x3f, 0x41, 0x77, 0xa0, 0x33, 0xe4, 0xa8, 0x34, 0x1f, 0xeb,
0xc4, 0x73, 0x59, 0x4c, 0xeb, 0xe4, 0x37, 0xdb, 0xf7, 0x7d, 0x6b, 0x34, 0xc2, 0xfe, 0xb6, 0xe7,
0x9a, 0x3c, 0xf8, 0xb6, 0x87, 0x21, 0x99, 0x74, 0x29, 0x73, 0x97, 0x28, 0x0f, 0x86, 0x4d, 0x1e,
0x44, 0x89, 0x90, 0xa0, 0x57, 0xe1, 0x1c, 0xc1, 0xba, 0x8d, 0x4d, 0x2d, 0xe6, 0x35, 0x3c, 0x08,
0xca, 0x7c, 0x60, 0x50, 0x7c, 0xd6, 0x92, 0x93, 0x97, 0x94, 0xdf, 0x4b, 0x80, 0xa2, 0x76, 0x87,
0x35, 0x18, 0xcc, 0x68, 0xd2, 0x4b, 0xa5, 0x9c, 0x58, 0x7c, 0x09, 0x1a, 0x66, 0xb8, 0x52, 0x18,
0xf9, 0x14, 0x40, 0xdd, 0x80, 0xb3, 0xa1, 0xd1, 0x24, 0x86, 0xcd, 0xb0, 0x78, 0xe6, 0xc0, 0xbb,
0x0c, 0x96, 0x4c, 0x6b, 0x95, 0x74, 0x5a, 0x8b, 0x1f, 0x25, 0x54, 0x13, 0x47, 0x09, 0xca, 0x27,
0x25, 0x90, 0x59, 0x88, 0xda, 0x9e, 0xf6, 0x19, 0x73, 0x11, 0x7d, 0x05, 0xda, 0xe2, 0x3a, 0x2b,
0x41, 0x78, 0xeb, 0x71, 0x6c, 0x33, 0x74, 0x1d, 0xce, 0xf3, 0x49, 0x3e, 0x26, 0x13, 0x7b, 0x5a,
0x37, 0xf2, 0x22, 0x0e, 0x3d, 0xe6, 0xb1, 0x91, 0x0e, 0x85, 0x2b, 0x1e, 0xc0, 0xea, 0xc8, 0xf6,
0x86, 0xba, 0xad, 0x25, 0xd5, 0x53, 0xd0, 0x23, 0x65, 0x8d, 0xf8, 0x3c, 0x5f, 0x3e, 0x88, 0xeb,
0x90, 0xa0, 0x5d, 0xda, 0xb9, 0xe2, 0x47, 0x51, 0x5e, 0x17, 0xa7, 0xc4, 0xf3, 0xa4, 0xf5, 0x16,
0x5d, 0x18, 0xfe, 0x29, 0x1f, 0x4b, 0xd0, 0x4d, 0x9d, 0x06, 0xa6, 0xfb, 0x1f, 0x29, 0xdb, 0xff,
0xbc, 0x09, 0x55, 0xda, 0x14, 0xf0, 0x10, 0xd6, 0xc9, 0xaf, 0xcd, 0x93, 0xbb, 0xaa, 0x7c, 0x01,
0xba, 0x06, 0x2b, 0x39, 0x77, 0x27, 0xc2, 0x06, 0x50, 0xf6, 0xea, 0x44, 0xf9, 0x43, 0x05, 0x9a,
0x31, 0x79, 0xcc, 0x68, 0xdd, 0xe6, 0x39, 0xde, 0x49, 0xb1, 0x57, 0xce, 0xb2, 0x57, 0x70, 0x79,
0x40, 0xed, 0xce, 0xc1, 0x0e, 0x2f, 0x7a, 0x45, 0x05, 0xee, 0x60, 0x87, 0xf5, 0x12, 0xd4, 0x24,
0x27, 0x0e, 0x6f, 0xba, 0xb8, 0x3b, 0x2d, 0xbb, 0x13, 0x87, 0xb5, 0x5c, 0xc9, 0x7a, 0x7f, 0xf9,
0x29, 0xf5, 0x7e, 0x3d, 0x59, 0xef, 0x27, 0xfc, 0xa8, 0x91, 0xf6, 0xa3, 0x79, 0xbb, 0xa9, 0xeb,
0xb0, 0x62, 0xf8, 0x58, 0x0f, 0xb0, 0x79, 0xeb, 0x64, 0x3b, 0x1a, 0xea, 0x35, 0x59, 0xaa, 0xcc,
0x1b, 0x42, 0xb7, 0xa7, 0xc7, 0x22, 0x5c, 0xcb, 0x2d, 0xa6, 0xe5, 0xfc, 0x76, 0x42, 0xe8, 0x86,
0x2b, 0x39, 0x8c, 0xb8, 0xec, 0x2f, 0xdd, 0xc7, 0xb5, 0xcf, 0xd4, 0xc7, 0xbd, 0x00, 0xcd, 0xe9,
0xe1, 0x00, 0xe9, 0x75, 0x78, 0xe4, 0x8b, 0x4e, 0x07, 0x48, 0x22, 0x18, 0x74, 0x93, 0xc1, 0xe0,
0x2f, 0x65, 0xe8, 0x4c, 0x2b, 0xf8, 0xb9, 0x43, 0xc1, 0x3c, 0x77, 0x80, 0xfb, 0x20, 0x4f, 0x73,
0x24, 0x93, 0xd2, 0x53, 0x9b, 0x90, 0xf4, 0x81, 0x7b, 0x77, 0x9c, 0xf2, 0xb9, 0xc4, 0x91, 0x66,
0xe5, 0x54, 0x47, 0x9a, 0x0b, 0x5e, 0x15, 0xdd, 0x80, 0x0b, 0x3e, 0xaf, 0xea, 0x4d, 0x2d, 0xc1,
0x36, 0x2f, 0x90, 0xcf, 0x87, 0x83, 0x07, 0x71, 0xf6, 0x0b, 0xdc, 0x78, 0xb9, 0xc8, 0x8d, 0xd3,
0x6a, 0xac, 0x67, 0xd4, 0x98, 0xbd, 0xb1, 0x6a, 0xe4, 0xdd, 0x58, 0x3d, 0x80, 0x95, 0x07, 0x2e,
0x99, 0x0c, 0x89, 0xe1, 0x5b, 0x43, 0x1c, 0x1e, 0x77, 0xcd, 0xa5, 0xd6, 0x3e, 0xd4, 0x45, 0xbc,
0xe6, 0x2a, 0x6d, 0xa8, 0xd1, 0xbf, 0xf2, 0x43, 0x09, 0x56, 0xb3, 0xfb, 0x32, 0x8b, 0x99, 0x06,
0x03, 0x29, 0x11, 0x0c, 0xbe, 0x03, 0x2b, 0xd3, 0xed, 0xb5, 0xc4, 0xce, 0x05, 0xc5, 0x5a, 0x0e,
0xe1, 0x2a, 0x9a, 0xee, 0x11, 0xc2, 0x94, 0x7f, 0x49, 0xd1, 0xa1, 0x21, 0x85, 0x8d, 0xd8, 0x51,
0x28, 0x4d, 0x50, 0x9e, 0x6b, 0x5b, 0x6e, 0xd4, 0x71, 0x0a, 0x1e, 0x39, 0x50, 0x74, 0x9c, 0xef,
0x42, 0x57, 0x4c, 0x8a, 0xf2, 0xcc, 0x9c, 0xc5, 0x52, 0x87, 0xaf, 0x8b, 0x32, 0xcc, 0x55, 0xe8,
0x78, 0x87, 0x87, 0x71, 0x7c, 0x3c, 0x50, 0xb6, 0x05, 0x54, 0x20, 0xfc, 0x26, 0xc8, 0xe1, 0xb4,
0xd3, 0x66, 0xb6, 0xae, 0x58, 0x18, 0xd5, 0xa9, 0x3f, 0x90, 0xa0, 0x97, 0xcc, 0x73, 0x31, 0xf6,
0x4f, 0x5f, 0xa7, 0x7d, 0x2d, 0x79, 0xbb, 0x73, 0xf5, 0x29, 0xf4, 0x4c, 0xf1, 0x88, 0xe3, 0x81,
0x8d, 0x6f, 0x40, 0x23, 0xea, 0x08, 0x50, 0x13, 0x96, 0x1f, 0xb8, 0x77, 0x5c, 0xef, 0x89, 0x2b,
0x2f, 0xa1, 0x65, 0x28, 0xdf, 0xb4, 0x6d, 0x59, 0x42, 0x6d, 0x68, 0x0c, 0x02, 0x1f, 0xeb, 0x8e,
0xe5, 0x8e, 0xe4, 0x12, 0xea, 0x00, 0xbc, 0x6b, 0x91, 0xc0, 0xf3, 0x2d, 0x43, 0xb7, 0xe5, 0xf2,
0xc6, 0x47, 0xd0, 0x49, 0x7a, 0x3d, 0x6a, 0x41, 0x7d, 0xdf, 0x0b, 0xde, 0xf9, 0xd0, 0x22, 0x81,
0xbc, 0x44, 0xe7, 0xef, 0x7b, 0xc1, 0x81, 0x8f, 0x09, 0x76, 0x03, 0x59, 0x42, 0x00, 0xb5, 0xf7,
0xdc, 0x1d, 0x8b, 0x3c, 0x92, 0x4b, 0x68, 0x45, 0x24, 0x65, 0xdd, 0xde, 0x13, 0xae, 0x24, 0x97,
0xe9, 0xf2, 0xe8, 0xaf, 0x82, 0x64, 0x68, 0x45, 0x53, 0x76, 0x0f, 0x1e, 0xc8, 0x55, 0xd4, 0x80,
0x2a, 0xff, 0xac, 0x6d, 0x98, 0x20, 0xa7, 0x2b, 0x4a, 0xba, 0x27, 0x67, 0x22, 0x02, 0xc9, 0x4b,
0x94, 0x33, 0x51, 0xa5, 0xcb, 0x12, 0xea, 0x42, 0x33, 0x56, 0x20, 0xcb, 0x25, 0x0a, 0xd8, 0xf5,
0xc7, 0x86, 0x28, 0x95, 0x39, 0x09, 0x54, 0xef, 0x3b, 0x54, 0x12, 0x95, 0x8d, 0x5b, 0x50, 0x0f,
0xc3, 0x11, 0x9d, 0x2a, 0x44, 0x44, 0x7f, 0xe5, 0x25, 0x74, 0x0e, 0xda, 0x89, 0x7b, 0x76, 0x59,
0x42, 0x08, 0x3a, 0xc9, 0x37, 0x10, 0x72, 0x69, 0xeb, 0x97, 0x2d, 0x00, 0x5e, 0xaf, 0x79, 0x9e,
0x6f, 0xa2, 0x31, 0xa0, 0x5d, 0x1c, 0xd0, 0x5c, 0xe4, 0xb9, 0x61, 0x1e, 0x21, 0xe8, 0x7a, 0x41,
0x59, 0x93, 0x9d, 0x2a, 0x48, 0xed, 0x17, 0xf5, 0xa2, 0xa9, 0xe9, 0xca, 0x12, 0x72, 0x18, 0xc6,
0xfb, 0x96, 0x83, 0xef, 0x5b, 0xc6, 0xa3, 0xa8, 0xd0, 0x2b, 0xc6, 0x98, 0x9a, 0x1a, 0x62, 0x4c,
0x85, 0x7d, 0xf1, 0x33, 0x08, 0x7c, 0xcb, 0x1d, 0x85, 0xb7, 0x75, 0xca, 0x12, 0x7a, 0x0c, 0xe7,
0x77, 0x31, 0xc3, 0x6e, 0x91, 0xc0, 0x32, 0x48, 0x88, 0x70, 0xab, 0x18, 0x61, 0x66, 0xf2, 0x29,
0x51, 0xda, 0xd0, 0x4d, 0xbd, 0x55, 0x42, 0x1b, 0xf9, 0x17, 0x7e, 0x79, 0xef, 0xaa, 0xfa, 0xaf,
0xce, 0x35, 0x37, 0xc2, 0x66, 0x41, 0x27, 0xf9, 0x1e, 0x07, 0xfd, 0x7f, 0xd1, 0x06, 0x99, 0x27,
0x07, 0xfd, 0x8d, 0x79, 0xa6, 0x46, 0xa8, 0x1e, 0x72, 0x7b, 0x9a, 0x85, 0x2a, 0xf7, 0xb9, 0x47,
0xff, 0x69, 0x17, 0xa5, 0xca, 0x12, 0xfa, 0x2e, 0x9c, 0xcb, 0x3c, 0x8c, 0x40, 0x5f, 0xca, 0x6f,
0xc1, 0xf3, 0xdf, 0x4f, 0xcc, 0xc2, 0xf0, 0x30, 0xed, 0x0d, 0xc5, 0xd4, 0x67, 0x1e, 0xd2, 0xcc,
0x4f, 0x7d, 0x6c, 0xfb, 0xa7, 0x51, 0x7f, 0x6a, 0x0c, 0x13, 0xe6, 0x36, 0xe9, 0xce, 0xe1, 0xb5,
0x3c, 0x14, 0x85, 0xaf, 0x33, 0xfa, 0x9b, 0xf3, 0x4e, 0x8f, 0x5b, 0x57, 0xf2, 0x01, 0x40, 0xbe,
0xd0, 0x72, 0x1f, 0x2d, 0xe4, 0x5b, 0x57, 0xfe, 0x7b, 0x02, 0x65, 0x09, 0xdd, 0x4f, 0x44, 0x43,
0xf4, 0x72, 0x91, 0x72, 0x92, 0xe7, 0x09, 0xb3, 0xe4, 0xa6, 0x01, 0xec, 0xe2, 0xe0, 0x1e, 0x0e,
0x7c, 0xcb, 0x20, 0xe9, 0x4d, 0xc5, 0xcf, 0x74, 0x42, 0xb8, 0xe9, 0x2b, 0x33, 0xe7, 0x45, 0x64,
0x0f, 0xa1, 0xb9, 0x8b, 0x03, 0x71, 0xde, 0x43, 0x50, 0xe1, 0xca, 0x70, 0x46, 0x88, 0x62, 0x7d,
0xf6, 0xc4, 0x78, 0x44, 0x49, 0xbd, 0x47, 0x40, 0x85, 0xb2, 0xcd, 0xbe, 0x92, 0xc8, 0x8f, 0x28,
0x05, 0x0f, 0x1c, 0x94, 0xa5, 0xad, 0x7f, 0x03, 0x34, 0x58, 0x8a, 0xa0, 0xa9, 0xe7, 0x7f, 0x19,
0xe2, 0x19, 0x64, 0x88, 0x0f, 0xa0, 0x9b, 0xba, 0xde, 0xce, 0xd7, 0x67, 0xfe, 0x1d, 0xf8, 0x2c,
0x93, 0x1f, 0x02, 0xca, 0x5e, 0x50, 0xe7, 0x87, 0x8a, 0xc2, 0x8b, 0xec, 0x59, 0x38, 0xde, 0xe7,
0x2f, 0x44, 0xa2, 0xea, 0xf5, 0x95, 0x22, 0x6f, 0x4d, 0x1d, 0x46, 0x7e, 0xfe, 0x81, 0xf4, 0xd9,
0x27, 0x9a, 0x0f, 0xa0, 0x9b, 0xba, 0xb6, 0xc9, 0xd7, 0x6e, 0xfe, 0xdd, 0xce, 0xac, 0xdd, 0x3f,
0xc3, 0x88, 0x6c, 0xc2, 0x4a, 0xce, 0x81, 0x3c, 0xca, 0xcd, 0x22, 0xc5, 0x27, 0xf7, 0xb3, 0x18,
0x1a, 0x40, 0x8d, 0x5f, 0xce, 0xa0, 0x17, 0xf3, 0x3b, 0x86, 0xd8, 0xc5, 0x4d, 0x7f, 0xd6, 0xf5,
0x0e, 0x99, 0xd8, 0x01, 0xdf, 0xb4, 0xca, 0x42, 0x18, 0xca, 0xbd, 0xdb, 0x8b, 0xdf, 0xa9, 0xf4,
0x67, 0x5f, 0xa3, 0x84, 0x9b, 0x3e, 0xeb, 0x5c, 0x72, 0xeb, 0xf5, 0x87, 0x5b, 0x23, 0x2b, 0x38,
0x9a, 0x0c, 0xa9, 0x90, 0xae, 0xf1, 0x99, 0xaf, 0x59, 0x9e, 0xf8, 0xba, 0x16, 0x92, 0x76, 0x8d,
0xed, 0x74, 0x8d, 0xf1, 0x32, 0x1e, 0x0e, 0x6b, 0xec, 0xf7, 0xc6, 0x7f, 0x02, 0x00, 0x00, 0xff,
0xff, 0x89, 0xfb, 0x65, 0x6d, 0x1f, 0x30, 0x00, 0x00,
0xf9, 0x53, 0xbd, 0x4d, 0xf7, 0xd7, 0x5b, 0xf9, 0x8d, 0x3d, 0xee, 0xf4, 0xcf, 0x4e, 0x9c, 0x72,
0x9c, 0xcc, 0x6f, 0x42, 0xc6, 0x66, 0x1c, 0xa2, 0x04, 0x82, 0x84, 0x3d, 0x13, 0x4f, 0x06, 0xdb,
0x93, 0xa1, 0xda, 0x0e, 0xc8, 0x8a, 0x54, 0x54, 0x57, 0xbd, 0xe9, 0x29, 0xb9, 0x96, 0x76, 0xbd,
0xea, 0x71, 0x26, 0x67, 0x84, 0xc4, 0x76, 0x40, 0x5c, 0x51, 0xc4, 0x01, 0x0e, 0x48, 0x44, 0x5c,
0xb8, 0x20, 0x21, 0xc4, 0x8d, 0x2b, 0x12, 0x7f, 0x00, 0x47, 0x04, 0x77, 0xc4, 0x15, 0xbd, 0xa5,
0xf6, 0x2a, 0x77, 0x8f, 0x27, 0x4e, 0x82, 0xc4, 0xad, 0xea, 0x7b, 0xcb, 0xb7, 0x6f, 0xef, 0x3d,
0x38, 0xf3, 0x68, 0x86, 0xfd, 0x63, 0xcd, 0xf0, 0x3c, 0xdf, 0xdc, 0x98, 0xfa, 0x5e, 0xe0, 0x21,
0xe4, 0x58, 0xf6, 0xd1, 0x8c, 0xf0, 0xbf, 0x0d, 0x36, 0x3e, 0xec, 0x18, 0x9e, 0xe3, 0x78, 0x2e,
0x87, 0x0d, 0x3b, 0xc9, 0x19, 0xc3, 0x9e, 0xe5, 0x06, 0xd8, 0x77, 0x75, 0x3b, 0x1c, 0x25, 0xc6,
0x21, 0x76, 0x74, 0xf1, 0x27, 0x9b, 0x7a, 0xa0, 0x27, 0xf7, 0x57, 0xbe, 0x27, 0xc1, 0xea, 0xe8,
0xd0, 0x7b, 0xbc, 0xe5, 0xd9, 0x36, 0x36, 0x02, 0xcb, 0x73, 0x89, 0x8a, 0x1f, 0xcd, 0x30, 0x09,
0xd0, 0x35, 0xa8, 0x8d, 0x75, 0x82, 0x07, 0xd2, 0x25, 0x69, 0xad, 0xbd, 0x79, 0x61, 0x23, 0x45,
0x89, 0x20, 0xe1, 0x2e, 0x99, 0xdc, 0xd4, 0x09, 0x56, 0xd9, 0x4c, 0x84, 0xa0, 0x66, 0x8e, 0x77,
0xb7, 0x07, 0x95, 0x4b, 0xd2, 0x5a, 0x55, 0x65, 0xdf, 0xe8, 0x25, 0xe8, 0x1a, 0xd1, 0xde, 0xbb,
0xdb, 0x64, 0x50, 0xbd, 0x54, 0x5d, 0xab, 0xaa, 0x69, 0xa0, 0xf2, 0x37, 0x09, 0xce, 0xe7, 0xc8,
0x20, 0x53, 0xcf, 0x25, 0x18, 0x5d, 0x87, 0x06, 0x09, 0xf4, 0x60, 0x46, 0x04, 0x25, 0xff, 0x57,
0x48, 0xc9, 0x88, 0x4d, 0x51, 0xc5, 0xd4, 0x3c, 0xda, 0x4a, 0x01, 0x5a, 0xf4, 0x65, 0x38, 0x6b,
0xb9, 0x77, 0xb1, 0xe3, 0xf9, 0xc7, 0xda, 0x14, 0xfb, 0x06, 0x76, 0x03, 0x7d, 0x82, 0x43, 0x1a,
0x57, 0xc2, 0xb1, 0xfd, 0x78, 0x08, 0xbd, 0x01, 0xe7, 0xb9, 0x96, 0x08, 0xf6, 0x8f, 0x2c, 0x03,
0x6b, 0xfa, 0x91, 0x6e, 0xd9, 0xfa, 0xd8, 0xc6, 0x83, 0xda, 0xa5, 0xea, 0x5a, 0x53, 0x3d, 0xc7,
0x86, 0x47, 0x7c, 0xf4, 0x46, 0x38, 0xa8, 0xfc, 0x4a, 0x82, 0x73, 0x94, 0xc3, 0x7d, 0xdd, 0x0f,
0xac, 0x67, 0x20, 0x67, 0x05, 0x3a, 0x49, 0xde, 0x06, 0x55, 0x36, 0x96, 0x82, 0xd1, 0x39, 0xd3,
0x10, 0x3d, 0x95, 0x49, 0x8d, 0xb1, 0x99, 0x82, 0x29, 0xbf, 0x14, 0x06, 0x91, 0xa4, 0xf3, 0x34,
0x8a, 0xc8, 0xe2, 0xac, 0xe4, 0x71, 0x3e, 0x85, 0x1a, 0x94, 0xbf, 0x4b, 0x70, 0xee, 0x8e, 0xa7,
0x9b, 0xb1, 0xc1, 0x7c, 0xf6, 0xe2, 0xfc, 0x3a, 0x34, 0xb8, 0x77, 0x0d, 0x6a, 0x0c, 0xd7, 0x95,
0x34, 0x2e, 0xe1, 0x79, 0x31, 0x85, 0x23, 0x06, 0x50, 0xc5, 0x22, 0x74, 0x05, 0x7a, 0x3e, 0x9e,
0xda, 0x96, 0xa1, 0x6b, 0xee, 0xcc, 0x19, 0x63, 0x7f, 0x50, 0xbf, 0x24, 0xad, 0xd5, 0xd5, 0xae,
0x80, 0xee, 0x31, 0xa0, 0xf2, 0x73, 0x09, 0x06, 0x2a, 0xb6, 0xb1, 0x4e, 0xf0, 0xe7, 0xc9, 0xec,
0x2a, 0x34, 0x5c, 0xcf, 0xc4, 0xbb, 0xdb, 0x8c, 0xd9, 0xaa, 0x2a, 0xfe, 0x94, 0x1f, 0x55, 0xb8,
0x22, 0xbe, 0xe0, 0x76, 0x9d, 0x50, 0x56, 0xfd, 0xd3, 0x51, 0x56, 0xa3, 0x48, 0x59, 0x7f, 0x8a,
0x95, 0xf5, 0x45, 0x17, 0x48, 0xac, 0xd0, 0x7a, 0x4a, 0xa1, 0xbf, 0x96, 0xe0, 0xb9, 0x1d, 0x1c,
0x44, 0xe4, 0x53, 0x7f, 0xc6, 0x5f, 0xd0, 0x60, 0xf5, 0x89, 0x04, 0xc3, 0x22, 0x5a, 0x4f, 0x13,
0xb0, 0x1e, 0xc0, 0x6a, 0x84, 0x43, 0x33, 0x31, 0x31, 0x7c, 0x6b, 0xca, 0xd4, 0xc8, 0x42, 0x57,
0x7b, 0xf3, 0xf2, 0x46, 0x3e, 0x25, 0x6f, 0x64, 0x29, 0x38, 0x17, 0x6d, 0xb1, 0x9d, 0xd8, 0x41,
0xf9, 0x89, 0x04, 0xe7, 0x76, 0x70, 0x30, 0xc2, 0x13, 0x07, 0xbb, 0xc1, 0xae, 0x7b, 0xe0, 0x3d,
0xbd, 0x5c, 0x9f, 0x07, 0x20, 0x62, 0x9f, 0x28, 0xac, 0x26, 0x20, 0x8b, 0xc8, 0x98, 0x65, 0xff,
0x2c, 0x3d, 0xa7, 0x91, 0xdd, 0x57, 0xa0, 0x6e, 0xb9, 0x07, 0x5e, 0x28, 0xaa, 0x17, 0x8a, 0x44,
0x95, 0x44, 0xc6, 0x67, 0x2b, 0x2e, 0xa7, 0xe2, 0x50, 0xf7, 0xcd, 0x3b, 0x58, 0x37, 0xb1, 0x7f,
0x0a, 0x73, 0xcb, 0xb2, 0x5d, 0x29, 0x60, 0xfb, 0xc7, 0x12, 0x9c, 0xcf, 0x21, 0x3c, 0x0d, 0xdf,
0x6f, 0x43, 0x83, 0xd0, 0xcd, 0x42, 0xc6, 0x5f, 0x2a, 0x64, 0x3c, 0x81, 0xee, 0x8e, 0x45, 0x02,
0x55, 0xac, 0x51, 0x3c, 0x90, 0xb3, 0x63, 0xe8, 0x45, 0xe8, 0x18, 0x87, 0xba, 0xeb, 0x62, 0x5b,
0x73, 0x75, 0x87, 0x0b, 0xa0, 0xa5, 0xb6, 0x05, 0x6c, 0x4f, 0x77, 0x30, 0x7a, 0x0e, 0x9a, 0xd4,
0x65, 0x35, 0xcb, 0x0c, 0xd5, 0xbf, 0xcc, 0x5c, 0xd8, 0x24, 0xe8, 0x22, 0x00, 0x1b, 0xd2, 0x4d,
0xd3, 0xe7, 0x69, 0xb4, 0xa5, 0xb6, 0x28, 0xe4, 0x06, 0x05, 0x28, 0x3f, 0x95, 0xa0, 0x43, 0x63,
0xf6, 0x5d, 0x1c, 0xe8, 0x54, 0x0f, 0xe8, 0x2d, 0x68, 0xd9, 0x9e, 0x6e, 0x6a, 0xc1, 0xf1, 0x94,
0xa3, 0xea, 0x65, 0x65, 0xcd, 0x59, 0xa0, 0x8b, 0xee, 0x1d, 0x4f, 0xb1, 0xda, 0xb4, 0xc5, 0xd7,
0x22, 0xf2, 0xce, 0xb9, 0x72, 0xb5, 0xc0, 0x95, 0xbf, 0x5f, 0x87, 0xd5, 0x6f, 0xeb, 0x81, 0x71,
0xb8, 0xed, 0x6c, 0x71, 0x26, 0x4f, 0x61, 0x04, 0x71, 0x6c, 0xab, 0x24, 0x63, 0xdb, 0xa7, 0x16,
0x3b, 0x23, 0x3b, 0xaf, 0x17, 0xd9, 0x39, 0x2d, 0xb2, 0x37, 0xde, 0x17, 0xaa, 0x4a, 0xd8, 0x79,
0x22, 0x07, 0x35, 0x9e, 0x26, 0x07, 0x6d, 0x41, 0x17, 0x7f, 0x68, 0xd8, 0x33, 0xaa, 0x73, 0x86,
0x7d, 0x99, 0x61, 0x7f, 0xbe, 0x00, 0x7b, 0xd2, 0xc9, 0x3a, 0x62, 0xd1, 0xae, 0xa0, 0x81, 0xab,
0xda, 0xc1, 0x81, 0x3e, 0x68, 0x32, 0x32, 0x2e, 0x95, 0xa9, 0x3a, 0xb4, 0x0f, 0xae, 0x6e, 0xfa,
0x87, 0x2e, 0x40, 0x4b, 0x64, 0xbc, 0xdd, 0xed, 0x41, 0x8b, 0x89, 0x2f, 0x06, 0x20, 0x1d, 0xba,
0x22, 0x02, 0x09, 0x0a, 0x81, 0x51, 0xf8, 0x76, 0x11, 0x82, 0x62, 0x65, 0x27, 0x29, 0x27, 0xef,
0xb8, 0x81, 0x7f, 0xac, 0x76, 0x48, 0x02, 0x34, 0xd4, 0xe0, 0x4c, 0x6e, 0x0a, 0x92, 0xa1, 0xfa,
0x10, 0x1f, 0x33, 0x03, 0xa9, 0xaa, 0xf4, 0x13, 0xbd, 0x0e, 0xf5, 0x23, 0xdd, 0x9e, 0x61, 0x66,
0x00, 0xf3, 0x65, 0xc4, 0x27, 0x7f, 0xb5, 0xf2, 0xa6, 0xa4, 0x7c, 0x5c, 0x81, 0xe7, 0x38, 0x6d,
0xd8, 0x0e, 0xf4, 0xcf, 0xd7, 0x16, 0x23, 0x3b, 0xab, 0x9d, 0xc8, 0xce, 0x2e, 0x02, 0x84, 0xc5,
0x8a, 0x65, 0x8a, 0xf4, 0x1e, 0x69, 0xc9, 0x4c, 0x9b, 0x40, 0xeb, 0xa4, 0x26, 0xa0, 0xfc, 0xb1,
0x06, 0x7d, 0x21, 0x3b, 0x3a, 0x83, 0x05, 0x90, 0x0b, 0xd0, 0x8a, 0x52, 0x8f, 0x50, 0x43, 0x0c,
0x40, 0x97, 0xa0, 0x9d, 0x70, 0x1f, 0x21, 0x87, 0x24, 0x68, 0x21, 0x61, 0x84, 0x85, 0x44, 0x2d,
0x51, 0x48, 0x5c, 0x04, 0x38, 0xb0, 0x67, 0xe4, 0x50, 0x0b, 0x2c, 0x07, 0x87, 0x9c, 0x32, 0xc8,
0x3d, 0xcb, 0xc1, 0xe8, 0x06, 0x74, 0xc6, 0x96, 0x6b, 0x7b, 0x13, 0x6d, 0xaa, 0x07, 0x87, 0x64,
0xd0, 0x28, 0x75, 0x98, 0x5b, 0x16, 0xb6, 0xcd, 0x9b, 0x6c, 0xae, 0xda, 0xe6, 0x6b, 0xf6, 0xe9,
0x12, 0xf4, 0x3c, 0xb4, 0xdd, 0x99, 0xa3, 0x79, 0x07, 0x9a, 0xef, 0x3d, 0xa6, 0x2e, 0xc7, 0x50,
0xb8, 0x33, 0xe7, 0xbd, 0x03, 0xd5, 0x7b, 0x4c, 0x43, 0x7f, 0x8b, 0x26, 0x01, 0x62, 0x7b, 0x13,
0x32, 0x68, 0x2e, 0xb4, 0x7f, 0xbc, 0x80, 0xae, 0x36, 0xa9, 0x99, 0xb1, 0xd5, 0xad, 0xc5, 0x56,
0x47, 0x0b, 0xd0, 0xcb, 0xd0, 0x33, 0x3c, 0x67, 0xaa, 0x33, 0x09, 0xdd, 0xf2, 0x3d, 0x87, 0xf9,
0x5b, 0x55, 0xcd, 0x40, 0xd1, 0x16, 0xb4, 0x2d, 0xd7, 0xc4, 0x1f, 0x0a, 0xa7, 0x6c, 0x33, 0x3c,
0x4a, 0x91, 0xca, 0x19, 0xa2, 0x5d, 0x3a, 0x97, 0x29, 0x1d, 0xac, 0xf0, 0x93, 0xd0, 0x8c, 0x14,
0xfa, 0x36, 0xb1, 0x3e, 0xc2, 0x83, 0x0e, 0xd7, 0xa2, 0x80, 0x8d, 0xac, 0x8f, 0x30, 0x2d, 0x92,
0x2d, 0x97, 0x60, 0x3f, 0xd0, 0x84, 0x51, 0x0e, 0xba, 0x2c, 0x6d, 0x75, 0x39, 0x54, 0xf8, 0x92,
0xf2, 0xdb, 0x0a, 0xf4, 0xd2, 0x88, 0xd0, 0x00, 0x96, 0x0f, 0x18, 0x24, 0xb4, 0x9e, 0xf0, 0x97,
0xa2, 0xc5, 0x2e, 0xed, 0xa0, 0x35, 0x46, 0x0b, 0x33, 0x9e, 0xa6, 0xda, 0xe6, 0x30, 0xb6, 0x01,
0x35, 0x02, 0xce, 0x1e, 0xcb, 0x94, 0x55, 0x86, 0xb2, 0xc5, 0x20, 0x2c, 0x4f, 0x0e, 0x60, 0x99,
0xb3, 0x11, 0x9a, 0x4e, 0xf8, 0x4b, 0x47, 0xc6, 0x33, 0x8b, 0x61, 0xe5, 0xa6, 0x13, 0xfe, 0xa2,
0x6d, 0xe8, 0xf0, 0x2d, 0xa7, 0xba, 0xaf, 0x3b, 0xa1, 0xe1, 0xbc, 0x58, 0xe8, 0xee, 0xb7, 0xf1,
0xf1, 0xfb, 0x34, 0x7a, 0xec, 0xeb, 0x96, 0xaf, 0x72, 0x41, 0xef, 0xb3, 0x55, 0x68, 0x0d, 0x64,
0xbe, 0xcb, 0x81, 0x65, 0x63, 0x61, 0x82, 0xcb, 0x2c, 0x19, 0xf7, 0x18, 0xfc, 0x96, 0x65, 0x63,
0x6e, 0x65, 0x11, 0x0b, 0x4c, 0xb4, 0x4d, 0x6e, 0x64, 0x0c, 0x42, 0x05, 0xab, 0xfc, 0xb5, 0x0a,
0x2b, 0xd4, 0xd7, 0x84, 0xdb, 0x9d, 0x22, 0x1a, 0x5d, 0x04, 0x30, 0x49, 0xa0, 0xa5, 0x22, 0x52,
0xcb, 0x24, 0xc1, 0x1e, 0x0f, 0x4a, 0x6f, 0x85, 0x01, 0xa7, 0x5a, 0x5e, 0xeb, 0x66, 0x7c, 0x3f,
0x9f, 0xdc, 0x9e, 0xaa, 0x1b, 0xbe, 0x0c, 0x5d, 0xe2, 0xcd, 0x7c, 0x03, 0x6b, 0xa9, 0xae, 0xa4,
0xc3, 0x81, 0x7b, 0xc5, 0x31, 0xb3, 0x51, 0xd8, 0x95, 0x27, 0xa2, 0xdb, 0xf2, 0xe9, 0x12, 0x5c,
0x33, 0x9b, 0xe0, 0x6e, 0x43, 0x9f, 0xb9, 0x9f, 0x36, 0xf5, 0x08, 0x6f, 0xee, 0x84, 0xd7, 0x66,
0xbc, 0x29, 0x3a, 0x85, 0xbb, 0x4b, 0x26, 0xfb, 0x62, 0xaa, 0xda, 0x63, 0x4b, 0xc3, 0x5f, 0xa2,
0xfc, 0xac, 0x02, 0xab, 0xa2, 0x59, 0x3c, 0xbd, 0x62, 0xcb, 0xd2, 0x4c, 0x18, 0x35, 0xab, 0x4f,
0x68, 0xbf, 0x6a, 0x0b, 0x94, 0x41, 0xf5, 0x82, 0x32, 0x28, 0xdd, 0x82, 0x34, 0x72, 0x2d, 0xc8,
0x75, 0xa8, 0x13, 0xc3, 0x9b, 0x62, 0xa6, 0x86, 0xde, 0xe6, 0xc5, 0x22, 0x35, 0x6c, 0xeb, 0x81,
0x3e, 0xa2, 0x93, 0x54, 0x3e, 0x57, 0xf9, 0x87, 0x04, 0xdd, 0x11, 0xd6, 0x7d, 0xe3, 0x30, 0x14,
0xc6, 0x1b, 0x50, 0xf5, 0xf1, 0x23, 0x21, 0x8b, 0x97, 0x4a, 0x04, 0x9d, 0x5a, 0xa2, 0xd2, 0x05,
0xe8, 0x05, 0x68, 0x9b, 0x8e, 0x1d, 0xc5, 0xa2, 0x0a, 0x0b, 0x0c, 0x60, 0x3a, 0xb6, 0x08, 0x44,
0x19, 0xfa, 0xab, 0x39, 0xfa, 0xd7, 0xe1, 0xcc, 0x81, 0xef, 0x39, 0x1a, 0xab, 0xd3, 0x35, 0x9b,
0x95, 0xe7, 0x4c, 0x58, 0x4d, 0xb5, 0x4f, 0x07, 0x12, 0x55, 0x7b, 0xcc, 0x6b, 0xfd, 0x04, 0xbc,
0xfe, 0x53, 0x82, 0xce, 0xb7, 0xe8, 0x50, 0xc8, 0xea, 0x9b, 0x49, 0x56, 0x5f, 0x2e, 0x61, 0x55,
0xc5, 0x81, 0x6f, 0xe1, 0x23, 0xfc, 0x5f, 0xc6, 0xec, 0x9f, 0x25, 0x18, 0x8e, 0x8e, 0x5d, 0x43,
0xe5, 0xde, 0x74, 0x7a, 0x93, 0xbf, 0x0c, 0xdd, 0xa3, 0x54, 0x93, 0xc4, 0x99, 0xee, 0x1c, 0x25,
0xbb, 0x24, 0x15, 0xe4, 0xb0, 0x16, 0x12, 0xcc, 0x86, 0xc1, 0xed, 0x95, 0x22, 0xaa, 0x33, 0xc4,
0xb1, 0xe0, 0xd0, 0xf7, 0xd3, 0x40, 0xc5, 0x87, 0x95, 0x82, 0x79, 0xe8, 0x3c, 0x2c, 0x8b, 0x86,
0x4c, 0x24, 0x31, 0xee, 0x83, 0x26, 0xcd, 0x61, 0xf1, 0x91, 0x82, 0x65, 0xe6, 0x0b, 0x20, 0x93,
0xaa, 0x2f, 0xaa, 0x9c, 0xcd, 0x9c, 0x7a, 0x4c, 0xa2, 0xfc, 0x41, 0x82, 0xd5, 0x77, 0x75, 0xd7,
0xf4, 0x0e, 0x0e, 0x4e, 0x2f, 0xb9, 0x2d, 0x48, 0x15, 0xd5, 0x8b, 0xb6, 0xeb, 0xa9, 0x45, 0xe8,
0x55, 0x38, 0xe3, 0xf3, 0xe8, 0x65, 0xa6, 0x45, 0x5b, 0x55, 0xe5, 0x70, 0x20, 0x12, 0xd9, 0x6f,
0x2a, 0x80, 0x68, 0xc4, 0xbd, 0xa9, 0xdb, 0xba, 0x6b, 0xe0, 0xa7, 0x27, 0xfd, 0x0a, 0xf4, 0x52,
0x79, 0x22, 0x3a, 0xd9, 0x4f, 0x26, 0x0a, 0x82, 0x6e, 0x43, 0x6f, 0xcc, 0x51, 0x69, 0x3e, 0xd6,
0x89, 0xe7, 0xb2, 0x00, 0xd8, 0x2b, 0xee, 0xcc, 0xef, 0xf9, 0xd6, 0x64, 0x82, 0xfd, 0x2d, 0xcf,
0x35, 0x79, 0xa4, 0xee, 0x8e, 0x43, 0x32, 0xe9, 0x52, 0xe6, 0x5b, 0x51, 0xd2, 0x0c, 0x3b, 0x42,
0x88, 0xb2, 0x26, 0x13, 0x05, 0xc1, 0xba, 0x1d, 0x0b, 0x22, 0x8e, 0x98, 0x32, 0x1f, 0x18, 0x95,
0x1f, 0xcc, 0x14, 0x24, 0x31, 0xe5, 0x77, 0x12, 0xa0, 0xa8, 0x37, 0x62, 0xdd, 0x08, 0xb3, 0xb0,
0xec, 0x52, 0xa9, 0x20, 0x70, 0x5f, 0x80, 0x96, 0x19, 0xae, 0x14, 0x1e, 0x11, 0x03, 0xa8, 0xcf,
0x70, 0x36, 0x34, 0x9a, 0xf1, 0xb0, 0x19, 0x56, 0xda, 0x1c, 0x78, 0x87, 0xc1, 0xd2, 0x39, 0xb0,
0x96, 0xcd, 0x81, 0xc9, 0x73, 0x87, 0x7a, 0xea, 0xdc, 0x41, 0xf9, 0xa4, 0x02, 0x32, 0x8b, 0x67,
0x5b, 0x71, 0x53, 0xb2, 0x10, 0xd1, 0x97, 0xa1, 0x2b, 0xee, 0xbe, 0x52, 0x84, 0x77, 0x1e, 0x25,
0x36, 0x43, 0xd7, 0xe0, 0x2c, 0x9f, 0xe4, 0x63, 0x32, 0xb3, 0xe3, 0x22, 0x93, 0x57, 0x7c, 0xe8,
0x11, 0x0f, 0xa4, 0x74, 0x28, 0x5c, 0x71, 0x1f, 0x56, 0x27, 0xb6, 0x37, 0xd6, 0x6d, 0x2d, 0xad,
0x9e, 0x92, 0x86, 0x2a, 0x6f, 0xf1, 0x67, 0xf9, 0xf2, 0x51, 0x52, 0x87, 0x04, 0xed, 0xd0, 0x36,
0x17, 0x3f, 0x8c, 0x8a, 0x00, 0x71, 0xa4, 0xbc, 0x48, 0x0d, 0xd0, 0xa1, 0x0b, 0xc3, 0x3f, 0xe5,
0x63, 0x09, 0xfa, 0x99, 0xa3, 0xc3, 0x6c, 0xb3, 0x24, 0xe5, 0x9b, 0xa5, 0x37, 0xa1, 0x4e, 0x3b,
0x08, 0x1e, 0xef, 0x7a, 0xc5, 0x85, 0x7c, 0x7a, 0x57, 0x95, 0x2f, 0x40, 0x57, 0x61, 0xa5, 0xe0,
0xa2, 0x45, 0xd8, 0x00, 0xca, 0xdf, 0xb3, 0x28, 0xbf, 0xaf, 0x41, 0x3b, 0x21, 0x8f, 0x39, 0x7d,
0xde, 0x22, 0x67, 0x41, 0x19, 0xf6, 0xaa, 0x79, 0xf6, 0x4a, 0x6e, 0x1a, 0xa8, 0xdd, 0x39, 0xd8,
0xe1, 0x15, 0xb2, 0x28, 0xd7, 0x1d, 0xec, 0xb0, 0xc6, 0x83, 0x9a, 0xe4, 0xcc, 0xe1, 0x1d, 0x1a,
0x77, 0xa7, 0x65, 0x77, 0xe6, 0xb0, 0xfe, 0x2c, 0xdd, 0x1c, 0x2c, 0x3f, 0xa1, 0x39, 0x68, 0xa6,
0x9b, 0x83, 0x94, 0x1f, 0xb5, 0xb2, 0x7e, 0xb4, 0x68, 0xeb, 0x75, 0x0d, 0x56, 0x0c, 0x1f, 0xeb,
0x01, 0x36, 0x6f, 0x1e, 0x6f, 0x45, 0x43, 0x83, 0x36, 0xcb, 0xab, 0x45, 0x43, 0xe8, 0x56, 0x7c,
0x86, 0xc2, 0xb5, 0xdc, 0x61, 0x5a, 0x2e, 0xee, 0x3d, 0x84, 0x6e, 0xb8, 0x92, 0xc3, 0xf0, 0xcc,
0xfe, 0xb2, 0x4d, 0x5f, 0xf7, 0xa9, 0x9a, 0xbe, 0x17, 0xa0, 0x1d, 0x9f, 0x24, 0x90, 0x41, 0x8f,
0x47, 0xbe, 0xe8, 0x28, 0x81, 0xa4, 0x82, 0x41, 0x3f, 0x1d, 0x0c, 0xfe, 0x52, 0x85, 0x5e, 0x5c,
0xee, 0x2f, 0x1c, 0x0a, 0x16, 0xb9, 0x30, 0xdc, 0x03, 0x39, 0x4e, 0xa8, 0x4c, 0x4a, 0x4f, 0xec,
0x58, 0xb2, 0xa7, 0xf3, 0xfd, 0x69, 0xc6, 0xe7, 0x52, 0xe7, 0x9f, 0xb5, 0x13, 0x9d, 0x7f, 0x9e,
0xf2, 0x5e, 0xe9, 0x3a, 0x9c, 0x8b, 0x92, 0x68, 0x8a, 0x6d, 0x5e, 0x4d, 0x9f, 0x0d, 0x07, 0xf7,
0x93, 0xec, 0x97, 0xb8, 0xf1, 0x72, 0x99, 0x1b, 0x67, 0xd5, 0xd8, 0xcc, 0xa9, 0x31, 0x7f, 0xbd,
0xd5, 0x2a, 0xba, 0xde, 0xba, 0x0f, 0x2b, 0xf7, 0x5d, 0x32, 0x1b, 0x13, 0xc3, 0xb7, 0xc6, 0x38,
0x3c, 0x1b, 0x5b, 0x48, 0xad, 0x43, 0x68, 0x8a, 0x78, 0xcd, 0x55, 0xda, 0x52, 0xa3, 0x7f, 0xe5,
0x87, 0x12, 0xac, 0xe6, 0xf7, 0x65, 0x16, 0x13, 0x07, 0x03, 0x29, 0x15, 0x0c, 0xbe, 0x03, 0x2b,
0xf1, 0xf6, 0x5a, 0x6a, 0xe7, 0x92, 0xca, 0xae, 0x80, 0x70, 0x15, 0xc5, 0x7b, 0x84, 0x30, 0xe5,
0x5f, 0x52, 0x74, 0xc2, 0x48, 0x61, 0x13, 0x76, 0x6e, 0x4a, 0x13, 0x94, 0xe7, 0xda, 0x96, 0x1b,
0xb5, 0xa7, 0x82, 0x47, 0x0e, 0x14, 0xed, 0xe9, 0xbb, 0xd0, 0x17, 0x93, 0xa2, 0x3c, 0xb3, 0x60,
0x65, 0xd5, 0xe3, 0xeb, 0xa2, 0x0c, 0x73, 0x05, 0x7a, 0xde, 0xc1, 0x41, 0x12, 0x1f, 0x0f, 0x94,
0x5d, 0x01, 0x15, 0x08, 0xbf, 0x09, 0x72, 0x38, 0xed, 0xa4, 0x99, 0xad, 0x2f, 0x16, 0x46, 0x15,
0xda, 0x0f, 0x24, 0x18, 0xa4, 0xf3, 0x5c, 0x82, 0xfd, 0x93, 0xd7, 0x69, 0x5f, 0x4b, 0x5f, 0x05,
0x5d, 0x79, 0x02, 0x3d, 0x31, 0x1e, 0x71, 0x96, 0xb0, 0xfe, 0x0d, 0x68, 0x45, 0xed, 0x03, 0x6a,
0xc3, 0xf2, 0x7d, 0xf7, 0xb6, 0xeb, 0x3d, 0x76, 0xe5, 0x25, 0xb4, 0x0c, 0xd5, 0x1b, 0xb6, 0x2d,
0x4b, 0xa8, 0x0b, 0xad, 0x51, 0xe0, 0x63, 0xdd, 0xb1, 0xdc, 0x89, 0x5c, 0x41, 0x3d, 0x80, 0x77,
0x2d, 0x12, 0x78, 0xbe, 0x65, 0xe8, 0xb6, 0x5c, 0x5d, 0xff, 0x08, 0x7a, 0x69, 0xaf, 0x47, 0x1d,
0x68, 0xee, 0x79, 0xc1, 0x3b, 0x1f, 0x5a, 0x24, 0x90, 0x97, 0xe8, 0xfc, 0x3d, 0x2f, 0xd8, 0xf7,
0x31, 0xc1, 0x6e, 0x20, 0x4b, 0x08, 0xa0, 0xf1, 0x9e, 0xbb, 0x6d, 0x91, 0x87, 0x72, 0x05, 0xad,
0x88, 0xa4, 0xac, 0xdb, 0xbb, 0xc2, 0x95, 0xe4, 0x2a, 0x5d, 0x1e, 0xfd, 0xd5, 0x90, 0x0c, 0x9d,
0x68, 0xca, 0xce, 0xfe, 0x7d, 0xb9, 0x8e, 0x5a, 0x50, 0xe7, 0x9f, 0x8d, 0x75, 0x13, 0xe4, 0x6c,
0x45, 0x49, 0xf7, 0xe4, 0x4c, 0x44, 0x20, 0x79, 0x89, 0x72, 0x26, 0x4a, 0x7a, 0x59, 0x42, 0x7d,
0x68, 0x27, 0x0a, 0x64, 0xb9, 0x42, 0x01, 0x3b, 0xfe, 0xd4, 0x10, 0xa5, 0x32, 0x27, 0x81, 0xea,
0x7d, 0x9b, 0x4a, 0xa2, 0xb6, 0x7e, 0x13, 0x9a, 0x61, 0x38, 0xa2, 0x53, 0x85, 0x88, 0xe8, 0xaf,
0xbc, 0x84, 0xce, 0x40, 0x37, 0x75, 0x29, 0x2f, 0x4b, 0x08, 0x41, 0x2f, 0xfd, 0x60, 0x42, 0xae,
0x6c, 0xfe, 0xa2, 0x03, 0xc0, 0xeb, 0x35, 0xcf, 0xf3, 0x4d, 0x34, 0x05, 0xb4, 0x83, 0x03, 0x9a,
0x8b, 0x3c, 0x37, 0xcc, 0x23, 0x04, 0x5d, 0x2b, 0x29, 0x6b, 0xf2, 0x53, 0x05, 0xa9, 0xc3, 0xb2,
0xc6, 0x35, 0x33, 0x5d, 0x59, 0x42, 0x0e, 0xc3, 0x78, 0xcf, 0x72, 0xf0, 0x3d, 0xcb, 0x78, 0x18,
0x15, 0x7a, 0xe5, 0x18, 0x33, 0x53, 0x43, 0x8c, 0x99, 0xb0, 0x2f, 0x7e, 0x46, 0x81, 0x6f, 0xb9,
0x93, 0xf0, 0x6a, 0x4f, 0x59, 0x42, 0x8f, 0xe0, 0xec, 0x0e, 0x66, 0xd8, 0x2d, 0x12, 0x58, 0x06,
0x09, 0x11, 0x6e, 0x96, 0x23, 0xcc, 0x4d, 0x3e, 0x21, 0x4a, 0x1b, 0xfa, 0x99, 0x87, 0x4d, 0x68,
0xbd, 0xf8, 0x76, 0xb0, 0xe8, 0x11, 0xd6, 0xf0, 0xd5, 0x85, 0xe6, 0x46, 0xd8, 0x2c, 0xe8, 0xa5,
0x1f, 0xef, 0xa0, 0xff, 0x2f, 0xdb, 0x20, 0xf7, 0x3e, 0x61, 0xb8, 0xbe, 0xc8, 0xd4, 0x08, 0xd5,
0x03, 0x6e, 0x4f, 0xf3, 0x50, 0x15, 0xbe, 0x0d, 0x19, 0x3e, 0xe9, 0x56, 0x55, 0x59, 0x42, 0xdf,
0x85, 0x33, 0xb9, 0x57, 0x14, 0xe8, 0x4b, 0xc5, 0xfd, 0x7a, 0xf1, 0x63, 0x8b, 0x79, 0x18, 0x1e,
0x64, 0xbd, 0xa1, 0x9c, 0xfa, 0xdc, 0xab, 0x9b, 0xc5, 0xa9, 0x4f, 0x6c, 0xff, 0x24, 0xea, 0x4f,
0x8c, 0x61, 0xc6, 0xdc, 0x26, 0xdb, 0x39, 0xbc, 0x56, 0x84, 0xa2, 0xf4, 0x29, 0xc7, 0x70, 0x63,
0xd1, 0xe9, 0x49, 0xeb, 0x4a, 0xbf, 0x16, 0x28, 0x16, 0x5a, 0xe1, 0x0b, 0x87, 0x62, 0xeb, 0x2a,
0x7e, 0x7c, 0xa0, 0x2c, 0xa1, 0x7b, 0xa9, 0x68, 0x88, 0x5e, 0x2e, 0x53, 0x4e, 0xfa, 0x3c, 0x61,
0x9e, 0xdc, 0x34, 0x80, 0x1d, 0x1c, 0xdc, 0xc5, 0x81, 0x6f, 0x19, 0x24, 0xbb, 0xa9, 0xf8, 0x89,
0x27, 0x84, 0x9b, 0xbe, 0x32, 0x77, 0x5e, 0x44, 0xf6, 0x18, 0xda, 0x3b, 0x38, 0x10, 0x87, 0x43,
0x04, 0x95, 0xae, 0x0c, 0x67, 0x84, 0x28, 0xd6, 0xe6, 0x4f, 0x4c, 0x46, 0x94, 0xcc, 0xe3, 0x05,
0x54, 0x2a, 0xdb, 0xfc, 0x93, 0x8a, 0xe2, 0x88, 0x52, 0xf2, 0x1a, 0x42, 0x59, 0xda, 0xfc, 0x37,
0x40, 0x8b, 0xa5, 0x08, 0x9a, 0x7a, 0xfe, 0x97, 0x21, 0x9e, 0x41, 0x86, 0xf8, 0x00, 0xfa, 0x99,
0xbb, 0xf0, 0x62, 0x7d, 0x16, 0x5f, 0x98, 0xcf, 0x33, 0xf9, 0x31, 0xa0, 0xfc, 0x6d, 0x76, 0x71,
0xa8, 0x28, 0xbd, 0xf5, 0x9e, 0x87, 0xe3, 0x7d, 0xfe, 0x9c, 0x24, 0xaa, 0x5e, 0x5f, 0x29, 0xf3,
0xd6, 0xcc, 0xc9, 0xe5, 0xe7, 0x1f, 0x48, 0x9f, 0x7d, 0xa2, 0xf9, 0x00, 0xfa, 0x99, 0x3b, 0x9e,
0x62, 0xed, 0x16, 0x5f, 0x04, 0xcd, 0xdb, 0xfd, 0x33, 0x8c, 0xc8, 0x26, 0xac, 0x14, 0x9c, 0xde,
0xa3, 0xc2, 0x2c, 0x52, 0x7e, 0xcc, 0x3f, 0x8f, 0xa1, 0x11, 0x34, 0xf8, 0x4d, 0x0e, 0x7a, 0xb1,
0xb8, 0x63, 0x48, 0xdc, 0xf2, 0x0c, 0xe7, 0xdd, 0x05, 0x91, 0x99, 0x1d, 0xf0, 0x4d, 0xeb, 0x2c,
0x84, 0xa1, 0xc2, 0x8b, 0xc0, 0xe4, 0x05, 0xcc, 0x70, 0xfe, 0x9d, 0x4b, 0xb8, 0xe9, 0xb3, 0xce,
0x25, 0x37, 0x5f, 0x7f, 0xb0, 0x39, 0xb1, 0x82, 0xc3, 0xd9, 0x98, 0x0a, 0xe9, 0x2a, 0x9f, 0xf9,
0x9a, 0xe5, 0x89, 0xaf, 0xab, 0x21, 0x69, 0x57, 0xd9, 0x4e, 0x57, 0x19, 0x2f, 0xd3, 0xf1, 0xb8,
0xc1, 0x7e, 0xaf, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x98, 0x70, 0x46, 0x0e, 0x4c, 0x30, 0x00,
0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -507,8 +507,9 @@ func (broker *globalMetaBroker) releaseSegmentReferLock(ctx context.Context, tas
defer cancel()
releaseSegReferLockReq := &datapb.ReleaseSegmentLockRequest{
TaskID: taskID,
NodeID: Params.QueryCoordCfg.GetNodeID(),
TaskID: taskID,
NodeID: Params.QueryCoordCfg.GetNodeID(),
SegmentIDs: segmentIDs,
}
if err := retry.Do(ctx, func() error {

View File

@ -0,0 +1,386 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querycoord
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util"
)
type extraIndexInfo struct {
indexID UniqueID
indexName string
indexParams []*commonpb.KeyValuePair
indexSize uint64
indexFilePaths []string
}
type handoffTaskState int32
const (
// when we watched a handoff task
handoffTaskInit handoffTaskState = iota
// when we notified a segment is index
handoffTaskReady
// we've send handoff task to scheduler, and wait for response.
handoffTaskTriggered
// task done, wait to be cleaned
handoffTaskDone
// handoff canceled due to collection released or other behavior
handoffTaskCancel
)
var (
// Errors
ErrHandoffRequestInvalid = errors.New("invalid handoff request")
)
type HandOffTask struct {
segmentInfo *querypb.SegmentInfo
state handoffTaskState
locked bool
}
type HandoffHandler struct {
ctx context.Context
cancel context.CancelFunc
client kv.MetaKv
revision int64
taskMutex sync.Mutex
tasks map[int64]*HandOffTask
notify chan bool
meta Meta
scheduler *TaskScheduler
cluster Cluster
broker *globalMetaBroker
wg sync.WaitGroup
}
func newHandoffHandler(ctx context.Context, client kv.MetaKv, meta Meta, cluster Cluster, scheduler *TaskScheduler, broker *globalMetaBroker) (*HandoffHandler, error) {
childCtx, cancel := context.WithCancel(ctx)
checker := &HandoffHandler{
ctx: childCtx,
cancel: cancel,
client: client,
tasks: make(map[int64]*HandOffTask, 1024),
notify: make(chan bool, 1024),
meta: meta,
scheduler: scheduler,
cluster: cluster,
broker: broker,
}
err := checker.reloadFromKV()
if err != nil {
log.Error("index checker reload from kv failed", zap.Error(err))
return nil, err
}
return checker, nil
}
func (handler *HandoffHandler) Start() {
handler.wg.Add(1)
go handler.schedule()
}
func (handler *HandoffHandler) Stop() {
handler.cancel()
close(handler.notify)
handler.wg.Wait()
}
// reloadFromKV reload collection/partition, remove from etcd if failed.
func (handler *HandoffHandler) reloadFromKV() error {
_, handoffReqValues, version, err := handler.client.LoadWithRevision(util.HandoffSegmentPrefix)
if err != nil {
log.Error("reloadFromKV: LoadWithRevision from kv failed", zap.Error(err))
return err
}
handler.revision = version
handler.taskMutex.Lock()
defer handler.taskMutex.Unlock()
for _, value := range handoffReqValues {
segmentInfo := &querypb.SegmentInfo{}
err := proto.Unmarshal([]byte(value), segmentInfo)
if err != nil {
log.Error("reloadFromKV: unmarshal failed", zap.Any("error", err.Error()))
return err
}
isValid, _ := handler.verifyRequest(segmentInfo)
if isValid && Params.QueryCoordCfg.AutoHandoff {
// push the req to handoffReqChan and then wait to load after index created
// in case handoffReqChan is full, and block start process
handler.tasks[segmentInfo.SegmentID] = &HandOffTask{
segmentInfo, handoffTaskInit, false,
}
log.Info("reloadFromKV: process handoff request done", zap.Int64("segmentId", segmentInfo.SegmentID))
} else {
handler.tasks[segmentInfo.SegmentID] = &HandOffTask{
segmentInfo, handoffTaskCancel, false,
}
log.Info("reloadFromKV: collection/partition has not been loaded, task canceled", zap.Int64("segmentId", segmentInfo.SegmentID))
}
}
return nil
}
func (handler *HandoffHandler) verifyRequest(req *querypb.SegmentInfo) (bool, *querypb.CollectionInfo) {
// if collection has not been loaded, then skip the segment
collectionInfo, err := handler.meta.getCollectionInfoByID(req.CollectionID)
if err != nil {
return false, nil
}
// if partition has not been loaded or released, then skip handoff the segment
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
for _, id := range collectionInfo.PartitionIDs {
if id == req.PartitionID {
return true, collectionInfo
}
}
} else {
// Should not happen?
partitionReleased := false
for _, id := range collectionInfo.ReleasedPartitionIDs {
if id == req.PartitionID {
partitionReleased = true
}
}
if !partitionReleased {
return true, collectionInfo
}
}
return false, nil
}
func (handler *HandoffHandler) enqueue(req *querypb.SegmentInfo) {
handler.taskMutex.Lock()
defer handler.taskMutex.Unlock()
handler.tasks[req.SegmentID] = &HandOffTask{
req, handoffTaskInit, false,
}
handler.notify <- false
}
func (handler *HandoffHandler) schedule() {
defer handler.wg.Done()
timer := time.NewTimer(time.Second * 5)
for {
select {
case <-handler.ctx.Done():
return
case _, ok := <-handler.notify:
if ok {
handler.taskMutex.Lock()
log.Info("handoff task scheduled: ", zap.Int("task number", len(handler.tasks)))
for segmentID := range handler.tasks {
handler.process(segmentID)
}
handler.taskMutex.Unlock()
}
case <-timer.C:
handler.taskMutex.Lock()
log.Info("handoff task scheduled: ", zap.Int("task number", len(handler.tasks)))
for segmentID := range handler.tasks {
handler.process(segmentID)
}
handler.taskMutex.Unlock()
}
}
}
// must hold the lock
func (handler *HandoffHandler) process(segmentID int64) error {
task := handler.tasks[segmentID]
// if task is cancel and success, clean up
switch task.state {
case handoffTaskCancel, handoffTaskDone:
overrideSegments := handler.getOverrideSegments(task)
for _, daughterSegmentID := range overrideSegments {
handler.clean(daughterSegmentID)
}
handler.clean(segmentID)
case handoffTaskInit:
isValid, collectionInfo := handler.verifyRequest(task.segmentInfo)
if !isValid || !Params.QueryCoordCfg.AutoHandoff {
task.state = handoffTaskCancel
log.Info("HandoffHandler: collection/partition has not been loaded, task canceled", zap.Int64("segmentID", task.segmentInfo.SegmentID))
return ErrHandoffRequestInvalid
}
// TODO, add segment lock here, if segment lock add failed,
if !task.locked {
if err := handler.broker.acquireSegmentsReferLock(handler.ctx, task.segmentInfo.SegmentID, []UniqueID{task.segmentInfo.SegmentID}); err != nil {
// if task can not be holded, there are three possible situation
// 1. temporary fail of data coord -> retry
// 2. collection is dropped -> verify Handoff Req valid should find the collection is released, the verifyHandoffReqValid before lock should handle it.
// 3. compaction happened -> we should soon received another handoff task to handle current task.
log.Warn("HandoffHandler: acquire segment reference lock failed", zap.Int64("segmentID", task.segmentInfo.SegmentID), zap.Error(err))
return fmt.Errorf("failed to acquire segment refer lock")
}
}
task.locked = true
// TODO we should not directly poll the index info, wait for notification should be a better idea.
indexInfo, err := handler.broker.getIndexInfo(handler.ctx, task.segmentInfo.CollectionID, task.segmentInfo.SegmentID, collectionInfo.Schema)
if err == nil {
// if index exist or not enableIndex, ready to load
task.segmentInfo.IndexInfos = indexInfo
// NOTICE, this is the trick, if compaction happened recursively, it should be all in our task list.
task := &HandOffTask{
task.segmentInfo, handoffTaskReady, true,
}
handler.tasks[task.segmentInfo.SegmentID] = task
handler.notify <- false
log.Info("HandoffHandler: enqueue indexed segments", zap.Int64("segmentID", task.segmentInfo.SegmentID))
}
case handoffTaskReady:
validHandoffReq, _ := handler.verifyRequest(task.segmentInfo)
if !validHandoffReq || !Params.QueryCoordCfg.AutoHandoff {
task.state = handoffTaskCancel
log.Info("HandoffHandler: collection/partition has not been loaded, task canceled", zap.Int64("segmentID", task.segmentInfo.SegmentID))
return ErrHandoffRequestInvalid
}
handler.triggerHandoff(task)
}
// handoffTaskTriggered state don't need to be handled in the loop, it will handled by the go routine in triggerHandoff
return nil
}
func (handler *HandoffHandler) clean(segmentID int64) {
task := handler.tasks[segmentID]
// this is the trick, we go through all the task, check if any of the task is shaded by current task, then we handle both.
if task.locked {
if err := handler.broker.releaseSegmentReferLock(handler.ctx, task.segmentInfo.SegmentID, []UniqueID{task.segmentInfo.SegmentID}); err != nil {
log.Warn("HandoffHandler: release segment reference lock failed", zap.Int64("segmentID", task.segmentInfo.SegmentID), zap.Error(err))
return
}
task.locked = false
}
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, task.segmentInfo.CollectionID, task.segmentInfo.PartitionID, task.segmentInfo.SegmentID)
err := handler.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Warn("HandoffHandler: remove handoff segment from etcd failed", zap.Int64("segmentID", task.segmentInfo.SegmentID), zap.Error(err))
// just wait for next loop
return
}
log.Info("HandoffHandler: clean task", zap.Int32("state", int32(task.state)), zap.Int64("segmentID", task.segmentInfo.SegmentID))
delete(handler.tasks, segmentID)
}
func (handler *HandoffHandler) getOverrideSegments(parentTask *HandOffTask) []int64 {
var toRelease []int64
for segmentID, task := range handler.tasks {
for _, compactFrom := range parentTask.segmentInfo.CompactionFrom {
if segmentID == compactFrom {
releasedSegmentIDs := handler.getOverrideSegments(task)
toRelease = append(toRelease, releasedSegmentIDs...)
toRelease = append(toRelease, segmentID)
}
}
}
if len(toRelease) > 0 {
log.Info("HandoffHandler: find recursive compaction ",
zap.Int64("TargetSegment", parentTask.segmentInfo.SegmentID),
zap.Int64s("CompactedSegments", toRelease))
}
return toRelease
}
func (handler *HandoffHandler) triggerHandoff(task *HandOffTask) {
log.Info("HandoffHandler: trigger handoff", zap.Any("segmentInfo", task.segmentInfo))
baseTask := newBaseTask(handler.ctx, querypb.TriggerCondition_Handoff)
// if recursive compaction happened, previous segment also need to be released
toRelease := handler.getOverrideSegments(task)
toRelease = append(toRelease, task.segmentInfo.GetCompactionFrom()...)
handoffReq := &querypb.HandoffSegmentsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HandoffSegments,
},
SegmentInfos: []*querypb.SegmentInfo{task.segmentInfo},
ReleasedSegments: toRelease,
}
handoffTask := &handoffTask{
baseTask: baseTask,
HandoffSegmentsRequest: handoffReq,
broker: handler.broker,
cluster: handler.cluster,
meta: handler.meta,
}
err := handler.scheduler.Enqueue(handoffTask)
if err != nil {
// we just wait for next cycle for reschedule
log.Error("HandoffHandler: handoffTask enqueue failed",
zap.Int64("segmentID", task.segmentInfo.SegmentID),
zap.Error(err))
return
}
log.Info("HandoffHandler: handoff task triggered successfully", zap.Int64("segmentID", task.segmentInfo.SegmentID))
handler.tasks[task.segmentInfo.SegmentID] = &HandOffTask{
task.segmentInfo, handoffTaskTriggered, task.locked,
}
go func() {
err := handoffTask.waitToFinish()
handler.taskMutex.Lock()
defer handler.taskMutex.Unlock()
if err != nil {
log.Warn("HandoffHandler: handoff task failed to execute",
zap.Int64("segmentID", task.segmentInfo.SegmentID),
zap.Error(err))
// wait for reschedule
handler.tasks[task.segmentInfo.SegmentID] = &HandOffTask{
task.segmentInfo, handoffTaskReady, task.locked,
}
return
}
// wait for cleanup
log.Info("HandoffHandler: handoffTask completed", zap.Int64("segmentID", task.segmentInfo.SegmentID))
handler.tasks[task.segmentInfo.SegmentID] = &HandOffTask{
task.segmentInfo, handoffTaskDone, task.locked,
}
}()
}

View File

@ -0,0 +1,367 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querycoord
import (
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"github.com/golang/protobuf/proto"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
)
var handoffHandlerTestDir = "/tmp/milvus_test/handoff_handler"
func TestHandoffHandlerReloadFromKV(t *testing.T) {
refreshParams()
baseCtx, cancel := context.WithCancel(context.Background())
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(baseCtx, kv, nil, idAllocator)
assert.Nil(t, err)
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
t.Run("Test_CollectionNotExist", func(t *testing.T) {
handoffHandler, err := newHandoffHandler(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
assert.True(t, len(handoffHandler.tasks) > 0)
for _, task := range handoffHandler.tasks {
assert.Equal(t, handoffTaskCancel, task.state)
}
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadPartition, genDefaultCollectionSchema(false))
t.Run("Test_PartitionNotExist", func(t *testing.T) {
handoffHandler, err := newHandoffHandler(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
assert.True(t, len(handoffHandler.tasks) > 0)
for _, task := range handoffHandler.tasks {
assert.Equal(t, handoffTaskCancel, task.state)
}
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.setLoadType(defaultCollectionID, querypb.LoadType_LoadCollection)
t.Run("Test_CollectionExist", func(t *testing.T) {
handoffHandler, err := newHandoffHandler(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
assert.True(t, len(handoffHandler.tasks) > 0)
for _, task := range handoffHandler.tasks {
assert.Equal(t, handoffTaskInit, task.state)
}
})
cancel()
}
func TestHandoffNotificationLoop(t *testing.T) {
refreshParams()
ctx := context.Background()
coord, err := startQueryCoord(ctx)
assert.NoError(t, err)
defer coord.Stop()
// Notify
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.NoError(t, err)
err = coord.kvClient.Save(key, string(value))
assert.NoError(t, err)
// Wait for the handoff tasks canceled
for {
_, err := coord.kvClient.Load(key)
if err != nil {
break
}
}
}
func TestHandoff(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
rootCoord := newRootCoordMock(ctx)
indexCoord, err := newIndexCoordMock(handoffHandlerTestDir)
assert.Nil(t, err)
dataCoord := newDataCoordMock(ctx)
rootCoord.enableIndex = true
cm := storage.NewLocalChunkManager(storage.RootPath(handoffHandlerTestDir))
defer cm.RemoveWithPrefix("")
broker, err := newGlobalMetaBroker(ctx, rootCoord, dataCoord, indexCoord, cm)
assert.Nil(t, err)
taskScheduler := &TaskScheduler{
ctx: ctx,
cancel: cancel,
client: kv,
triggerTaskQueue: newTaskQueue(),
taskIDAllocator: idAllocator,
}
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
t.Run("Test_ReqInvalid", func(t *testing.T) {
handoffHandler, err := newHandoffHandler(ctx, kv, meta, nil, taskScheduler, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
handoffHandler.enqueue(segmentInfo)
err = handoffHandler.process(segmentInfo.SegmentID)
assert.ErrorIs(t, err, ErrHandoffRequestInvalid)
// Process this task until it is cleaned
for {
_, ok := handoffHandler.tasks[segmentInfo.SegmentID]
if !ok {
break
}
handoffHandler.process(segmentInfo.SegmentID)
}
// Check whether the handoff event is removed
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
})
t.Run("Test_CollectionReleased", func(t *testing.T) {
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
handoffHandler, err := newHandoffHandler(ctx, kv, meta, nil, taskScheduler, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
handoffHandler.enqueue(segmentInfo)
// Init -> Ready
err = handoffHandler.process(segmentInfo.SegmentID)
assert.NoError(t, err)
meta.releaseCollection(defaultCollectionID)
// Ready -> Cancel, due to the collection has been released
err = handoffHandler.process(segmentInfo.SegmentID)
assert.ErrorIs(t, err, ErrHandoffRequestInvalid)
task := handoffHandler.tasks[segmentInfo.SegmentID]
assert.Equal(t, handoffTaskCancel, task.state)
assert.True(t, task.locked)
// Process this task until it is cleaned
for {
task, ok := handoffHandler.tasks[segmentInfo.SegmentID]
if !ok {
break
}
log.Debug("process task",
zap.Int32("state", int32(task.state)),
zap.Bool("locked", task.locked))
handoffHandler.process(segmentInfo.SegmentID)
}
// Check whether the handoff event is removed
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, 0, dataCoord.segmentRefCount[segmentInfo.SegmentID])
})
t.Run("Test_SegmentCompacted", func(t *testing.T) {
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
defer meta.releaseCollection(defaultCollectionID)
handoffHandler, err := newHandoffHandler(ctx, kv, meta, nil, taskScheduler, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
handoffHandler.enqueue(segmentInfo)
newSegment := &querypb.SegmentInfo{
SegmentID: defaultSegmentID + 1,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
CompactionFrom: []UniqueID{defaultSegmentID},
}
newKey := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, defaultCollectionID, defaultPartitionID, newSegment.SegmentID)
value, err := proto.Marshal(newSegment)
assert.NoError(t, err)
err = kv.Save(newKey, string(value))
assert.NoError(t, err)
handoffHandler.enqueue(newSegment)
// Init -> Ready
err = handoffHandler.process(segmentInfo.SegmentID)
assert.NoError(t, err)
// Ready -> Triggered
err = handoffHandler.process(segmentInfo.SegmentID)
assert.NoError(t, err)
// Process the new segment task until it is cleaned,
// no any error in each step
for {
task, ok := handoffHandler.tasks[newSegment.SegmentID]
if !ok {
break
}
// Mock the task has succeeded
if task.state == handoffTaskTriggered {
task.state = handoffTaskDone
continue
}
err := handoffHandler.process(newSegment.SegmentID)
assert.NoError(t, err)
}
// The compacted segment should be removed
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
// The new segment should be also removed
for {
_, err := kv.Load(newKey)
if err != nil {
break
}
}
assert.Equal(t, 0, dataCoord.segmentRefCount[segmentInfo.SegmentID])
assert.Equal(t, 0, dataCoord.segmentRefCount[newSegment.SegmentID])
})
t.Run("Test_Handoff", func(t *testing.T) {
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
handoffHandler, err := newHandoffHandler(ctx, kv, meta, nil, taskScheduler, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
handoffHandler.enqueue(segmentInfo)
// Process this task until it is cleaned,
// no any error in each step
for {
task, ok := handoffHandler.tasks[segmentInfo.SegmentID]
if !ok {
break
}
// Mock the task has succeeded
if task.state == handoffTaskTriggered {
task.state = handoffTaskDone
continue
}
err := handoffHandler.process(segmentInfo.SegmentID)
assert.NoError(t, err)
}
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
})
}

View File

@ -69,6 +69,7 @@ func waitLoadPartitionDone(ctx context.Context, queryCoord *QueryCoord, collecti
func waitLoadCollectionDone(ctx context.Context, queryCoord *QueryCoord, collectionID UniqueID) error {
for {
log.Debug("waiting for loading collection done...")
showCollectionReq := &querypb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowPartitions,
@ -1062,9 +1063,8 @@ func TestLoadCollectionSyncSegmentsFail(t *testing.T) {
rollbackDone := waitLoadCollectionRollbackDone(queryCoord, loadCollectionReq.CollectionID)
assert.True(t, rollbackDone)
assert.NoError(t, node1.stop())
assert.NoError(t, queryCoord.Stop())
assert.NoError(t, removeAllSession())
node1.stop()
removeAllSession()
}
func Test_RepeatedLoadSamePartitions(t *testing.T) {

View File

@ -1,279 +0,0 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querycoord
import (
"context"
"fmt"
"sync"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
)
type extraIndexInfo struct {
indexID UniqueID
indexName string
indexParams []*commonpb.KeyValuePair
indexSize uint64
indexFilePaths []string
}
// IndexChecker checks index
type IndexChecker struct {
ctx context.Context
cancel context.CancelFunc
client kv.MetaKv
revision int64
handoffReqChan chan *querypb.SegmentInfo
unIndexedSegmentsChan chan *querypb.SegmentInfo
indexedSegmentsChan chan *querypb.SegmentInfo
meta Meta
scheduler *TaskScheduler
cluster Cluster
broker *globalMetaBroker
wg sync.WaitGroup
}
func newIndexChecker(ctx context.Context, client kv.MetaKv, meta Meta, cluster Cluster, scheduler *TaskScheduler, broker *globalMetaBroker) (*IndexChecker, error) {
childCtx, cancel := context.WithCancel(ctx)
reqChan := make(chan *querypb.SegmentInfo, 1024)
unIndexChan := make(chan *querypb.SegmentInfo, 1024)
indexedChan := make(chan *querypb.SegmentInfo, 1024)
checker := &IndexChecker{
ctx: childCtx,
cancel: cancel,
client: client,
handoffReqChan: reqChan,
unIndexedSegmentsChan: unIndexChan,
indexedSegmentsChan: indexedChan,
meta: meta,
scheduler: scheduler,
cluster: cluster,
broker: broker,
}
err := checker.reloadFromKV()
if err != nil {
log.Error("index checker reload from kv failed", zap.Error(err))
return nil, err
}
return checker, nil
}
func (ic *IndexChecker) start() {
ic.wg.Add(2)
go ic.checkIndexLoop()
go ic.processHandoffAfterIndexDone()
}
func (ic *IndexChecker) close() {
ic.cancel()
ic.wg.Wait()
}
// reloadFromKV reload collection/partition, remove from etcd if failed.
func (ic *IndexChecker) reloadFromKV() error {
_, handoffReqValues, version, err := ic.client.LoadWithRevision(handoffSegmentPrefix)
if err != nil {
log.Error("reloadFromKV: LoadWithRevision from kv failed", zap.Error(err))
return err
}
ic.revision = version
for _, value := range handoffReqValues {
segmentInfo := &querypb.SegmentInfo{}
err := proto.Unmarshal([]byte(value), segmentInfo)
if err != nil {
log.Error("reloadFromKV: unmarshal failed", zap.Any("error", err.Error()))
return err
}
validHandoffReq, _ := ic.verifyHandoffReqValid(segmentInfo)
if validHandoffReq && Params.QueryCoordCfg.AutoHandoff {
// push the req to handoffReqChan and then wait to load after index created
// in case handoffReqChan is full, and block start process
go ic.enqueueHandoffReq(segmentInfo)
} else {
log.Info("reloadFromKV: collection/partition has not been loaded, remove req from etcd", zap.Any("segmentInfo", segmentInfo))
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err = ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("reloadFromKV: remove handoff segment from etcd failed", zap.Error(err))
return err
}
}
log.Info("reloadFromKV: process handoff request done", zap.Any("segmentInfo", segmentInfo))
}
return nil
}
func (ic *IndexChecker) verifyHandoffReqValid(req *querypb.SegmentInfo) (bool, *querypb.CollectionInfo) {
// if collection has not been loaded, then skip the segment
collectionInfo, err := ic.meta.getCollectionInfoByID(req.CollectionID)
if err == nil {
// if partition has not been loaded or released, then skip handoff the segment
if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
for _, id := range collectionInfo.PartitionIDs {
if id == req.PartitionID {
return true, collectionInfo
}
}
} else {
partitionReleased := false
for _, id := range collectionInfo.ReleasedPartitionIDs {
if id == req.PartitionID {
partitionReleased = true
}
}
if !partitionReleased {
return true, collectionInfo
}
}
}
return false, nil
}
func (ic *IndexChecker) enqueueHandoffReq(req *querypb.SegmentInfo) {
ic.handoffReqChan <- req
}
func (ic *IndexChecker) enqueueUnIndexSegment(info *querypb.SegmentInfo) {
ic.unIndexedSegmentsChan <- info
}
func (ic *IndexChecker) enqueueIndexedSegment(info *querypb.SegmentInfo) {
ic.indexedSegmentsChan <- info
}
func (ic *IndexChecker) checkIndexLoop() {
defer ic.wg.Done()
for {
select {
case <-ic.ctx.Done():
return
case segmentInfo := <-ic.handoffReqChan:
// TODO:: check whether the index exists in parallel, in case indexCoord cannot create the index normally, and then block the loop
log.Debug("checkIndexLoop: start check index for handoff segment", zap.Int64("segmentID", segmentInfo.SegmentID))
for {
validHandoffReq, collectionInfo := ic.verifyHandoffReqValid(segmentInfo)
if validHandoffReq && Params.QueryCoordCfg.AutoHandoff {
indexInfo, err := ic.broker.getIndexInfo(ic.ctx, segmentInfo.CollectionID, segmentInfo.SegmentID, collectionInfo.Schema)
if err == nil {
// if index exist or not enableIndex, ready to load
segmentInfo.IndexInfos = indexInfo
ic.enqueueIndexedSegment(segmentInfo)
break
}
// if segment has not been compacted and dropped, continue to wait for the build index to complete
segmentState, err := ic.broker.getSegmentStates(ic.ctx, segmentInfo.SegmentID)
if err != nil {
log.Warn("checkIndexLoop: get segment state failed", zap.Int64("segmentID", segmentInfo.SegmentID), zap.Error(err))
continue
}
if segmentState.State != commonpb.SegmentState_NotExist {
continue
}
log.Info("checkIndexLoop: segment has been compacted and dropped before handoff", zap.Int64("segmentID", segmentInfo.SegmentID))
}
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err := ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("checkIndexLoop: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
break
}
case segmentInfo := <-ic.unIndexedSegmentsChan:
//TODO:: check index after load collection/partition, some segments may don't has index when loading
log.Warn("checkIndexLoop: start check index for segment which has not loaded index", zap.Int64("segmentID", segmentInfo.SegmentID))
}
}
}
func (ic *IndexChecker) processHandoffAfterIndexDone() {
defer ic.wg.Done()
for {
select {
case <-ic.ctx.Done():
return
case segmentInfo := <-ic.indexedSegmentsChan:
collectionID := segmentInfo.CollectionID
partitionID := segmentInfo.PartitionID
segmentID := segmentInfo.SegmentID
log.Info("processHandoffAfterIndexDone: handoff segment start", zap.Any("segmentInfo", segmentInfo))
baseTask := newBaseTask(ic.ctx, querypb.TriggerCondition_Handoff)
handoffReq := &querypb.HandoffSegmentsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HandoffSegments,
},
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
}
handoffTask := &handoffTask{
baseTask: baseTask,
HandoffSegmentsRequest: handoffReq,
broker: ic.broker,
cluster: ic.cluster,
meta: ic.meta,
}
err := ic.scheduler.Enqueue(handoffTask)
if err != nil {
log.Error("processHandoffAfterIndexDone: handoffTask enqueue failed", zap.Error(err))
panic(err)
}
go func() {
err := handoffTask.waitToFinish()
if err != nil {
// collection or partition may have been released before handoffTask enqueue
log.Warn("processHandoffAfterIndexDone: handoffTask failed", zap.Error(err))
}
log.Info("processHandoffAfterIndexDone: handoffTask completed", zap.Any("segment infos", handoffTask.SegmentInfos))
}()
// once task enqueue, etcd data can be cleaned, handoffTask will recover from taskScheduler's reloadFromKV()
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, collectionID, partitionID, segmentID)
err = ic.client.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("processHandoffAfterIndexDone: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
}
}
}

View File

@ -1,281 +0,0 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querycoord
import (
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/storage"
"github.com/stretchr/testify/assert"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/etcd"
)
var indexCheckerTestDir = "/tmp/milvus_test/index_checker"
func TestReloadFromKV(t *testing.T) {
refreshParams()
baseCtx, cancel := context.WithCancel(context.Background())
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(baseCtx, kv, nil, idAllocator)
assert.Nil(t, err)
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
t.Run("Test_CollectionNotExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
assert.Equal(t, 0, len(indexChecker.handoffReqChan))
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadPartition, genDefaultCollectionSchema(false))
t.Run("Test_PartitionNotExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
assert.Equal(t, 0, len(indexChecker.handoffReqChan))
})
err = kv.Save(key, string(value))
assert.Nil(t, err)
meta.setLoadType(defaultCollectionID, querypb.LoadType_LoadCollection)
t.Run("Test_CollectionExist", func(t *testing.T) {
indexChecker, err := newIndexChecker(baseCtx, kv, meta, nil, nil, nil)
assert.Nil(t, err)
for {
if len(indexChecker.handoffReqChan) > 0 {
break
}
}
})
cancel()
}
func TestCheckIndexLoop(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
rootCoord := newRootCoordMock(ctx)
indexCoord, err := newIndexCoordMock(indexCheckerTestDir)
assert.Nil(t, err)
rootCoord.enableIndex = true
cm := storage.NewLocalChunkManager(storage.RootPath(indexCheckerTestDir))
defer cm.RemoveWithPrefix("")
broker, err := newGlobalMetaBroker(ctx, rootCoord, nil, indexCoord, cm)
assert.Nil(t, err)
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
t.Run("Test_ReqInValid", func(t *testing.T) {
childCtx, childCancel := context.WithCancel(context.Background())
indexChecker, err := newIndexChecker(childCtx, kv, meta, nil, nil, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
indexChecker.enqueueHandoffReq(segmentInfo)
indexChecker.wg.Add(1)
go indexChecker.checkIndexLoop()
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, 0, len(indexChecker.indexedSegmentsChan))
childCancel()
indexChecker.wg.Wait()
})
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
t.Run("Test_GetIndexInfo", func(t *testing.T) {
childCtx, childCancel := context.WithCancel(context.Background())
indexChecker, err := newIndexChecker(childCtx, kv, meta, nil, nil, broker)
assert.Nil(t, err)
indexChecker.enqueueHandoffReq(segmentInfo)
indexChecker.wg.Add(1)
go indexChecker.checkIndexLoop()
for {
if len(indexChecker.indexedSegmentsChan) > 0 {
break
}
}
childCancel()
indexChecker.wg.Wait()
})
cancel()
}
func TestHandoffNotExistSegment(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
defer etcdCli.Close()
assert.Nil(t, err)
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
rootCoord := newRootCoordMock(ctx)
rootCoord.enableIndex = true
indexCoord, err := newIndexCoordMock(indexCheckerTestDir)
assert.Nil(t, err)
indexCoord.returnError = true
dataCoord := newDataCoordMock(ctx)
dataCoord.segmentState = commonpb.SegmentState_NotExist
cm := storage.NewLocalChunkManager(storage.RootPath(indexCheckerTestDir))
defer cm.RemoveWithPrefix("")
broker, err := newGlobalMetaBroker(ctx, rootCoord, dataCoord, indexCoord, cm)
assert.Nil(t, err)
meta.addCollection(defaultCollectionID, querypb.LoadType_LoadCollection, genDefaultCollectionSchema(false))
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
indexChecker, err := newIndexChecker(ctx, kv, meta, nil, nil, broker)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
indexChecker.enqueueHandoffReq(segmentInfo)
indexChecker.wg.Add(1)
go indexChecker.checkIndexLoop()
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, 0, len(indexChecker.indexedSegmentsChan))
cancel()
indexChecker.wg.Wait()
}
func TestProcessHandoffAfterIndexDone(t *testing.T) {
refreshParams()
ctx, cancel := context.WithCancel(context.Background())
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer etcdCli.Close()
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
id := UniqueID(rand.Int31())
idAllocator := func() (UniqueID, error) {
newID := atomic.AddInt64(&id, 1)
return newID, nil
}
meta, err := newMeta(ctx, kv, nil, idAllocator)
assert.Nil(t, err)
taskScheduler := &TaskScheduler{
ctx: ctx,
cancel: cancel,
client: kv,
triggerTaskQueue: newTaskQueue(),
taskIDAllocator: idAllocator,
}
indexChecker, err := newIndexChecker(ctx, kv, meta, nil, taskScheduler, nil)
assert.Nil(t, err)
indexChecker.wg.Add(1)
go indexChecker.processHandoffAfterIndexDone()
segmentInfo := &querypb.SegmentInfo{
SegmentID: defaultSegmentID,
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = kv.Save(key, string(value))
assert.Nil(t, err)
indexChecker.enqueueIndexedSegment(segmentInfo)
for {
_, err := kv.Load(key)
if err != nil {
break
}
}
assert.Equal(t, false, taskScheduler.triggerTaskQueue.taskEmpty())
cancel()
indexChecker.wg.Wait()
}

View File

@ -35,7 +35,6 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
@ -71,14 +70,14 @@ type Meta interface {
hasPartition(collectionID UniqueID, partitionID UniqueID) bool
hasReleasePartition(collectionID UniqueID, partitionID UniqueID) bool
releasePartitions(collectionID UniqueID, partitionIDs []UniqueID) error
getPartitionStatesByID(collectionID UniqueID, partitionID UniqueID) (*querypb.PartitionStates, error)
showSegmentInfos(collectionID UniqueID, partitionIDs []UniqueID) []*querypb.SegmentInfo
getSegmentInfoByID(segmentID UniqueID) (*querypb.SegmentInfo, error)
getSegmentInfosByNode(nodeID int64) []*querypb.SegmentInfo
getSegmentInfosByNodeAndCollection(nodeID, collectionID int64) []*querypb.SegmentInfo
saveSegmentInfo(segment *querypb.SegmentInfo) error
getPartitionStatesByID(collectionID UniqueID, partitionID UniqueID) (*querypb.PartitionStates, error)
saveGlobalSealedSegInfos(saves col2SegmentInfos, removes col2SegmentInfos) error
removeGlobalSealedSegInfos(collectionID UniqueID, partitionIDs []UniqueID) error
getDmChannel(dmChannelName string) (*querypb.DmChannelWatchInfo, bool)
getDmChannelInfosByNodeID(nodeID int64) []*querypb.DmChannelWatchInfo
@ -88,13 +87,8 @@ type Meta interface {
getDeltaChannelsByCollectionID(collectionID UniqueID) ([]*datapb.VchannelInfo, error)
setDeltaChannel(collectionID UniqueID, info []*datapb.VchannelInfo) error
getQueryChannelInfoByID(collectionID UniqueID) *querypb.QueryChannelInfo
setLoadType(collectionID UniqueID, loadType querypb.LoadType) error
setLoadPercentage(collectionID UniqueID, partitionID UniqueID, percentage int64, loadType querypb.LoadType) error
//printMeta()
saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2SealedSegmentChangeInfos, error)
removeGlobalSealedSegInfos(collectionID UniqueID, partitionIDs []UniqueID) (col2SealedSegmentChangeInfos, error)
getWatchedChannelsByNodeID(nodeID int64) *querypb.UnsubscribeChannelInfo
@ -121,8 +115,6 @@ type MetaReplica struct {
//sync.RWMutex
collectionInfos map[UniqueID]*querypb.CollectionInfo
collectionMu sync.RWMutex
queryChannelInfos map[UniqueID]*querypb.QueryChannelInfo
channelMu sync.RWMutex
deltaChannelInfos map[UniqueID][]*datapb.VchannelInfo
deltaChannelMu sync.RWMutex
dmChannelInfos map[string]*querypb.DmChannelWatchInfo
@ -138,7 +130,6 @@ type MetaReplica struct {
func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAllocator func() (UniqueID, error)) (Meta, error) {
childCtx, cancel := context.WithCancel(ctx)
collectionInfos := make(map[UniqueID]*querypb.CollectionInfo)
queryChannelInfos := make(map[UniqueID]*querypb.QueryChannelInfo)
deltaChannelInfos := make(map[UniqueID][]*datapb.VchannelInfo)
dmChannelInfos := make(map[string]*querypb.DmChannelWatchInfo)
@ -149,7 +140,6 @@ func newMeta(ctx context.Context, kv kv.MetaKv, factory dependency.Factory, idAl
idAllocator: idAllocator,
collectionInfos: collectionInfos,
queryChannelInfos: queryChannelInfos,
deltaChannelInfos: deltaChannelInfos,
dmChannelInfos: dmChannelInfos,
@ -583,15 +573,11 @@ func (m *MetaReplica) releasePartitions(collectionID UniqueID, releasedPartition
return nil
}
func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2SealedSegmentChangeInfos, error) {
if len(saves) == 0 {
return nil, nil
}
// TODO refactor this a weird implementation, too many edge cases
func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos, removes col2SegmentInfos) error {
// generate segment change info according segment info to updated
col2SegmentChangeInfos := make(col2SealedSegmentChangeInfos)
segmentsCompactionFrom := make([]*querypb.SegmentInfo, 0)
// get segmentInfos to colSegmentInfos
// for load balance, check if the online segments is offline at anywhere else
for collectionID, onlineInfos := range saves {
segmentsChangeInfo := &querypb.SealedSegmentsChangeInfo{
Base: &commonpb.MsgBase{
@ -604,6 +590,10 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
segmentID := info.SegmentID
onlineInfo := proto.Clone(info).(*querypb.SegmentInfo)
changeInfo := &querypb.SegmentChangeInfo{
OnlineSegments: []*querypb.SegmentInfo{onlineInfo},
}
// LoadBalance case
// A node loads the segment, and the other one offloads
offlineInfo, err := m.getSegmentInfoByID(segmentID)
@ -612,46 +602,37 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
onlineInfo.NodeIds = diffSlice(info.NodeIds, offlineInfo.NodeIds...)
offlineInfo.NodeIds = diffSlice(offlineInfo.NodeIds, info.NodeIds...)
segmentsChangeInfo.Infos = append(segmentsChangeInfo.Infos,
&querypb.SegmentChangeInfo{
OnlineSegments: []*querypb.SegmentInfo{onlineInfo},
OfflineSegments: []*querypb.SegmentInfo{offlineInfo},
})
changeInfo.OfflineSegments = append(changeInfo.OfflineSegments, offlineInfo)
}
// Handoff case
// generate offline segment change info if the loaded segment is compacted from other sealed segments
compactChangeInfo := &querypb.SegmentChangeInfo{}
for _, compactionSegmentID := range info.CompactionFrom {
offlineInfo, err := m.getSegmentInfoByID(compactionSegmentID)
if err == nil && offlineInfo.SegmentState == commonpb.SegmentState_Sealed {
compactChangeInfo.OfflineSegments = append(compactChangeInfo.OfflineSegments, offlineInfo)
segmentsCompactionFrom = append(segmentsCompactionFrom, offlineInfo)
} else {
return nil, fmt.Errorf("saveGlobalSealedSegInfos: the compacted segment %d has not been loaded into memory", compactionSegmentID)
}
}
compactChangeInfo.OnlineSegments = append(compactChangeInfo.OnlineSegments, onlineInfo)
segmentsChangeInfo.Infos = append(segmentsChangeInfo.Infos, compactChangeInfo)
segmentsChangeInfo.Infos = append(segmentsChangeInfo.Infos,
changeInfo)
}
col2SegmentChangeInfos[collectionID] = segmentsChangeInfo
}
// save segmentInfo to etcd
for _, infos := range saves {
for _, info := range infos {
if err := m.segmentsInfo.saveSegment(info); err != nil {
panic(err)
// for handoff, there are some segments removed from segment list
for collectionID, offlineInfos := range removes {
segmentsChangeInfo, ok := col2SegmentChangeInfos[collectionID]
if !ok {
// if the case we don't have same collection ID in saves, should not happen
segmentsChangeInfo = &querypb.SealedSegmentsChangeInfo{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_SealedSegmentsChangeInfo,
},
Infos: []*querypb.SegmentChangeInfo{},
}
col2SegmentChangeInfos[collectionID] = segmentsChangeInfo
}
changeInfo := &querypb.SegmentChangeInfo{}
for _, offlineInfo := range offlineInfos {
if offlineInfo.SegmentState == commonpb.SegmentState_Sealed {
changeInfo.OfflineSegments = append(changeInfo.OfflineSegments, offlineInfo)
}
}
}
// remove compacted segment info from etcd
for _, segmentInfo := range segmentsCompactionFrom {
if err := m.segmentsInfo.removeSegment(segmentInfo); err != nil {
panic(err)
}
segmentsChangeInfo.Infos = append(segmentsChangeInfo.Infos, changeInfo)
}
// save sealedSegmentsChangeInfo to etcd
@ -662,7 +643,7 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
for _, changeInfos := range col2SegmentChangeInfos {
changeInfoBytes, err := proto.Marshal(changeInfos)
if err != nil {
return col2SegmentChangeInfos, err
return err
}
// TODO:: segmentChangeInfo clear in etcd with coord gc and queryNode watch the changeInfo meta to deal changeInfoMsg
changeInfoKey := fmt.Sprintf("%s/%d", util.ChangeInfoMetaPrefix, changeInfos.Base.MsgID)
@ -671,16 +652,35 @@ func (m *MetaReplica) saveGlobalSealedSegInfos(saves col2SegmentInfos) (col2Seal
err := m.getKvClient().MultiSave(saveKvs)
if err != nil {
panic(err)
return err
}
return col2SegmentChangeInfos, nil
// TODO batch save/remove segment info to maintain atomicity
// save segmentInfo to etcd
for _, infos := range saves {
for _, info := range infos {
if err := m.segmentsInfo.saveSegment(info); err != nil {
panic(err)
}
}
}
// remove segmentInfos to remove
for _, infos := range removes {
for _, info := range infos {
if err := m.segmentsInfo.removeSegment(info); err != nil {
panic(err)
}
}
}
return nil
}
func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitionIDs []UniqueID) (col2SealedSegmentChangeInfos, error) {
func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitionIDs []UniqueID) error {
removes := m.showSegmentInfos(collectionID, partitionIDs)
if len(removes) == 0 {
return nil, nil
return nil
}
// get segmentInfos to remove
segmentChangeInfos := &querypb.SealedSegmentsChangeInfo{
@ -700,27 +700,6 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
}
}
// produce sealedSegmentChangeInfos to query channel
queryChannelInfo := m.getQueryChannelInfoByID(collectionID)
// update segmentInfo, queryChannelInfo meta to cache and etcd
seg2Info := make(map[UniqueID]*querypb.SegmentInfo)
for _, segmentInfo := range queryChannelInfo.GlobalSealedSegments {
segmentID := segmentInfo.SegmentID
seg2Info[segmentID] = segmentInfo
}
for _, segmentInfo := range removes {
segmentID := segmentInfo.SegmentID
delete(seg2Info, segmentID)
}
globalSealedSegmentInfos := make([]*querypb.SegmentInfo, 0)
for _, info := range seg2Info {
globalSealedSegmentInfos = append(globalSealedSegmentInfos, info)
}
queryChannelInfo.GlobalSealedSegments = globalSealedSegmentInfos
// remove meta from etcd
for _, info := range removes {
if err := m.segmentsInfo.removeSegment(info); err != nil {
@ -734,7 +713,7 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
// then the msgID key will not exist, and changeIndo will be ignored by query node
changeInfoBytes, err := proto.Marshal(segmentChangeInfos)
if err != nil {
return col2SealedSegmentChangeInfos{collectionID: segmentChangeInfos}, err
return err
}
// TODO:: segmentChangeInfo clear in etcd with coord gc and queryNode watch the changeInfo meta to deal changeInfoMsg
changeInfoKey := fmt.Sprintf("%s/%d", util.ChangeInfoMetaPrefix, segmentChangeInfos.Base.MsgID)
@ -745,11 +724,7 @@ func (m *MetaReplica) removeGlobalSealedSegInfos(collectionID UniqueID, partitio
panic(err)
}
m.channelMu.Lock()
m.queryChannelInfos[collectionID] = queryChannelInfo
m.channelMu.Unlock()
return col2SealedSegmentChangeInfos{collectionID: segmentChangeInfos}, nil
return nil
}
func (m *MetaReplica) showSegmentInfos(collectionID UniqueID, partitionIDs []UniqueID) []*querypb.SegmentInfo {
@ -800,10 +775,6 @@ func (m *MetaReplica) getSegmentInfosByNodeAndCollection(nodeID, collectionID in
return res
}
func (m *MetaReplica) saveSegmentInfo(segment *querypb.SegmentInfo) error {
return m.segmentsInfo.saveSegment(segment)
}
func (m *MetaReplica) getCollectionInfoByID(collectionID UniqueID) (*querypb.CollectionInfo, error) {
m.collectionMu.RLock()
defer m.collectionMu.RUnlock()
@ -881,32 +852,6 @@ func (m *MetaReplica) setDmChannelInfos(dmChannelWatchInfos ...*querypb.DmChanne
return nil
}
// createQueryChannel creates topic names for search channel and search result channel
// Search channel's suffix is fixed with "-0"
// Search result channel's suffix is fixed with "-0"
func (m *MetaReplica) createQueryChannel(collectionID UniqueID) *querypb.QueryChannelInfo {
allocatedQueryChannel := fmt.Sprintf("%s-0", Params.CommonCfg.QueryCoordSearch)
allocatedQueryResultChannel := fmt.Sprintf("%s-0", Params.CommonCfg.QueryCoordSearchResult)
log.Info("query coordinator is creating query channel",
zap.String("query channel name", allocatedQueryChannel),
zap.String("query result channel name", allocatedQueryResultChannel))
seekPosition := &internalpb.MsgPosition{
ChannelName: allocatedQueryChannel,
}
segmentInfos := m.showSegmentInfos(collectionID, nil)
info := &querypb.QueryChannelInfo{
CollectionID: collectionID,
QueryChannel: allocatedQueryChannel,
QueryResultChannel: allocatedQueryResultChannel,
GlobalSealedSegments: segmentInfos,
SeekPosition: seekPosition,
}
return info
}
// Get delta channel info for collection, so far all the collection share the same query channel 0
func (m *MetaReplica) getDeltaChannelsByCollectionID(collectionID UniqueID) ([]*datapb.VchannelInfo, error) {
m.deltaChannelMu.RLock()
@ -938,22 +883,6 @@ func (m *MetaReplica) setDeltaChannel(collectionID UniqueID, infos []*datapb.Vch
return nil
}
// Get Query channel info for collection, so far all the collection share the same query channel 0
func (m *MetaReplica) getQueryChannelInfoByID(collectionID UniqueID) *querypb.QueryChannelInfo {
m.channelMu.Lock()
defer m.channelMu.Unlock()
var channelInfo *querypb.QueryChannelInfo
if info, ok := m.queryChannelInfos[collectionID]; ok {
channelInfo = proto.Clone(info).(*querypb.QueryChannelInfo)
} else {
channelInfo = m.createQueryChannel(collectionID)
m.queryChannelInfos[collectionID] = channelInfo
}
return proto.Clone(channelInfo).(*querypb.QueryChannelInfo)
}
func (m *MetaReplica) setLoadType(collectionID UniqueID, loadType querypb.LoadType) error {
m.collectionMu.Lock()
defer m.collectionMu.Unlock()
@ -1033,8 +962,7 @@ func (m *MetaReplica) getWatchedChannelsByNodeID(nodeID int64) *querypb.Unsubscr
// 1. find all the search/dmChannel/deltaChannel the node has watched
colID2DmChannels := make(map[UniqueID][]string)
colID2DeltaChannels := make(map[UniqueID][]string)
// TODO remove colID2QueryChannel since it's not used
colID2QueryChannel := make(map[UniqueID]string)
dmChannelInfos := m.getDmChannelInfosByNodeID(nodeID)
// get dmChannel/search channel the node has watched
for _, channelInfo := range dmChannelInfos {
@ -1044,10 +972,6 @@ func (m *MetaReplica) getWatchedChannelsByNodeID(nodeID int64) *querypb.Unsubscr
colID2DmChannels[collectionID] = []string{}
}
colID2DmChannels[collectionID] = append(colID2DmChannels[collectionID], dmChannel)
if _, ok := colID2QueryChannel[collectionID]; !ok {
queryChannelInfo := m.getQueryChannelInfoByID(collectionID)
colID2QueryChannel[collectionID] = queryChannelInfo.QueryChannel
}
}
segmentInfos := m.getSegmentInfosByNode(nodeID)
colIDs := make(map[UniqueID]bool)
@ -1070,10 +994,6 @@ func (m *MetaReplica) getWatchedChannelsByNodeID(nodeID int64) *querypb.Unsubscr
}
colID2DeltaChannels[collectionID] = deltaChannels
}
if _, ok := colID2QueryChannel[collectionID]; !ok {
queryChannelInfo := m.getQueryChannelInfoByID(collectionID)
colID2QueryChannel[collectionID] = queryChannelInfo.QueryChannel
}
}
// creating unsubscribeChannelInfo, which will be written to etcd
@ -1084,9 +1004,6 @@ func (m *MetaReplica) getWatchedChannelsByNodeID(nodeID int64) *querypb.Unsubscr
for collectionID, channels := range colID2DeltaChannels {
colID2Channels[collectionID] = append(colID2Channels[collectionID], channels...)
}
for collectionID, channel := range colID2QueryChannel {
colID2Channels[collectionID] = append(colID2Channels[collectionID], channel)
}
unsubscribeChannelInfo := &querypb.UnsubscribeChannelInfo{
NodeID: nodeID,
@ -1201,22 +1118,6 @@ func (m *MetaReplica) updateShardLeader(replicaID UniqueID, dmChannel string, le
return m.replicas.UpdateShardLeader(replicaID, dmChannel, leaderID, leaderAddr, m.getKvClient())
}
//func (m *MetaReplica) printMeta() {
// m.RLock()
// defer m.RUnlock()
// for id, info := range m.collectionInfos {
// log.Debug("query coordinator MetaReplica: collectionInfo", zap.Int64("collectionID", id), zap.Any("info", info))
// }
//
// for id, info := range m.segmentInfos {
// log.Debug("query coordinator MetaReplica: segmentInfo", zap.Int64("segmentID", id), zap.Any("info", info))
// }
//
// for id, info := range m.queryChannelInfos {
// log.Debug("query coordinator MetaReplica: queryChannelInfo", zap.Int64("collectionID", id), zap.Any("info", info))
// }
//}
func saveGlobalCollectionInfo(collectionID UniqueID, info *querypb.CollectionInfo, kv kv.MetaKv) error {
infoBytes, err := proto.Marshal(info)
if err != nil {

View File

@ -27,9 +27,12 @@ import (
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util"
@ -118,11 +121,10 @@ func TestMetaFunc(t *testing.T) {
NodeIds: []int64{nodeID},
}
meta := &MetaReplica{
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
segmentsInfo: segmentsInfo,
replicas: NewReplicaInfos(),
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
segmentsInfo: segmentsInfo,
replicas: NewReplicaInfos(),
}
meta.setKvClient(kv)
dmChannels := []string{"testDm1", "testDm2"}
@ -160,11 +162,6 @@ func TestMetaFunc(t *testing.T) {
assert.NotNil(t, err)
})
t.Run("Test GetQueryChannelInfoByIDFirst", func(t *testing.T) {
res := meta.getQueryChannelInfoByID(defaultCollectionID)
assert.NotNil(t, res)
})
t.Run("Test GetPartitionStatesByIDFail", func(t *testing.T) {
res, err := meta.getPartitionStatesByID(defaultCollectionID, defaultPartitionID)
assert.Nil(t, res)
@ -256,12 +253,6 @@ func TestMetaFunc(t *testing.T) {
assert.Equal(t, defaultSegmentID, infos[0].SegmentID)
})
t.Run("Test getQueryChannelSecond", func(t *testing.T) {
info := meta.getQueryChannelInfoByID(defaultCollectionID)
assert.NotNil(t, info.QueryChannel)
assert.NotNil(t, info.QueryResultChannel)
})
t.Run("Test GetSegmentInfoByID", func(t *testing.T) {
info, err := meta.getSegmentInfoByID(defaultSegmentID)
assert.Nil(t, err)
@ -311,7 +302,6 @@ func TestReloadMetaFromKV(t *testing.T) {
meta := &MetaReplica{
idAllocator: idAllocator,
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
deltaChannelInfos: map[UniqueID][]*datapb.VchannelInfo{},
segmentsInfo: newSegmentsInfo(kv),
@ -378,49 +368,188 @@ func TestReloadMetaFromKV(t *testing.T) {
assert.Equal(t, collectionInfo.CollectionID, replicas[0].CollectionID)
}
func TestCreateQueryChannel(t *testing.T) {
func TestSaveSegments(t *testing.T) {
refreshParams()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer etcdCli.Close()
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
nodeID := defaultQueryNodeID
segmentsInfo := newSegmentsInfo(kv)
segmentsInfo.segmentIDMap[defaultSegmentID] = &querypb.SegmentInfo{
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentID: defaultSegmentID,
NodeID: nodeID,
NodeIds: []int64{nodeID},
meta := &MetaReplica{
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
segmentsInfo: newSegmentsInfo(kv),
replicas: NewReplicaInfos(),
client: kv,
}
fixedQueryChannel := Params.CommonCfg.QueryCoordSearch + "-0"
fixedQueryResultChannel := Params.CommonCfg.QueryCoordSearchResult + "-0"
t.Run("LoadCollection", func(t *testing.T) {
defer func() {
meta.segmentsInfo = newSegmentsInfo(kv)
}()
eventChan := kv.WatchWithPrefix(util.ChangeInfoMetaPrefix)
tests := []struct {
inID UniqueID
outQueryChannel string
outResultChannel string
segmentNum := 5
save := MockSaveSegments(segmentNum)
log.Debug("save segments...",
zap.Any("segments", save))
meta.saveGlobalSealedSegInfos(save, nil)
description string
}{
{0, fixedQueryChannel, fixedQueryResultChannel, "collection ID = 0"},
{1, fixedQueryChannel, fixedQueryResultChannel, "collection ID = 1"},
}
log.Debug("wait for etcd event")
sawOnlineSegments := false
for !sawOnlineSegments {
watchResp, ok := <-eventChan
assert.True(t, ok)
m := &MetaReplica{
collectionInfos: map[UniqueID]*querypb.CollectionInfo{},
queryChannelInfos: map[UniqueID]*querypb.QueryChannelInfo{},
dmChannelInfos: map[string]*querypb.DmChannelWatchInfo{},
segmentsInfo: segmentsInfo,
}
m.setKvClient(kv)
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
info := m.createQueryChannel(test.inID)
assert.Equal(t, info.GetQueryChannel(), test.outQueryChannel)
assert.Equal(t, info.GetQueryResultChannel(), test.outResultChannel)
for _, event := range watchResp.Events {
changeInfoBatch := &querypb.SealedSegmentsChangeInfo{}
err := proto.Unmarshal(event.Kv.Value, changeInfoBatch)
assert.NoError(t, err)
assert.Equal(t, segmentNum, len(changeInfoBatch.GetInfos()))
for _, changeInfo := range changeInfoBatch.GetInfos() {
assert.Empty(t, changeInfo.OfflineSegments)
assert.Equal(t, 1, len(changeInfo.OnlineSegments))
}
sawOnlineSegments = true
}
}
})
t.Run("LoadBalance", func(t *testing.T) {
defer func() {
meta.segmentsInfo = newSegmentsInfo(kv)
}()
eventChan := kv.WatchWithPrefix(util.ChangeInfoMetaPrefix)
segmentNum := 5
save := MockSaveSegments(segmentNum)
for _, segment := range save[defaultCollectionID] {
meta.segmentsInfo.saveSegment(segment)
}
balancedSegment := &querypb.SegmentInfo{
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentID: defaultSegmentID,
DmChannel: "testDmChannel",
SegmentState: commonpb.SegmentState_Sealed,
NodeIds: []UniqueID{defaultQueryNodeID + 1},
}
save = map[int64][]*querypb.SegmentInfo{
defaultCollectionID: {balancedSegment},
}
meta.saveGlobalSealedSegInfos(save, nil)
sawOnlineSegments := false
for !sawOnlineSegments {
watchResp, ok := <-eventChan
assert.True(t, ok)
for _, event := range watchResp.Events {
changeInfoBatch := &querypb.SealedSegmentsChangeInfo{}
err := proto.Unmarshal(event.Kv.Value, changeInfoBatch)
assert.NoError(t, err)
assert.Equal(t, 1, len(changeInfoBatch.GetInfos()))
for _, changeInfo := range changeInfoBatch.GetInfos() {
assert.Equal(t, 1, len(changeInfo.OfflineSegments))
assert.Equal(t, 1, len(changeInfo.OnlineSegments))
assert.Equal(t, defaultQueryNodeID, changeInfo.OfflineSegments[0].NodeIds[0])
assert.Equal(t, defaultQueryNodeID+1, changeInfo.OnlineSegments[0].NodeIds[0])
}
sawOnlineSegments = true
}
}
})
t.Run("Handoff", func(t *testing.T) {
defer func() {
meta.segmentsInfo = newSegmentsInfo(kv)
}()
eventChan := kv.WatchWithPrefix(util.ChangeInfoMetaPrefix)
segmentNum := 5
save := MockSaveSegments(segmentNum)
for _, segment := range save[defaultCollectionID] {
meta.segmentsInfo.saveSegment(segment)
}
spawnSegment := &querypb.SegmentInfo{
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentID: defaultSegmentID + int64(segmentNum),
DmChannel: "testDmChannel",
SegmentState: commonpb.SegmentState_Sealed,
NodeIds: []UniqueID{defaultQueryNodeID + 1},
CompactionFrom: []UniqueID{defaultSegmentID, defaultSegmentID + 1},
}
save = map[int64][]*querypb.SegmentInfo{
defaultCollectionID: {spawnSegment},
}
remove := map[int64][]*querypb.SegmentInfo{
defaultCollectionID: {},
}
for _, segmentID := range spawnSegment.CompactionFrom {
segment, err := meta.getSegmentInfoByID(segmentID)
assert.NoError(t, err)
remove[defaultCollectionID] = append(remove[defaultCollectionID],
segment)
}
meta.saveGlobalSealedSegInfos(save, remove)
sawOnlineSegment := false
sawOfflineSegment := false
for !sawOnlineSegment || !sawOfflineSegment {
watchResp, ok := <-eventChan
assert.True(t, ok)
for _, event := range watchResp.Events {
changeInfoBatch := &querypb.SealedSegmentsChangeInfo{}
err := proto.Unmarshal(event.Kv.Value, changeInfoBatch)
assert.NoError(t, err)
for _, changeInfo := range changeInfoBatch.GetInfos() {
if !sawOnlineSegment {
assert.Equal(t, 1, len(changeInfo.OnlineSegments))
assert.Equal(t, defaultSegmentID+int64(segmentNum), changeInfo.OnlineSegments[0].SegmentID)
assert.Equal(t, defaultQueryNodeID+1, changeInfo.OnlineSegments[0].NodeIds[0])
sawOnlineSegment = true
} else {
assert.Equal(t, len(spawnSegment.CompactionFrom), len(changeInfo.OfflineSegments))
sawOfflineSegment = true
}
}
}
}
})
}
func MockSaveSegments(segmentNum int) col2SegmentInfos {
saves := make(col2SegmentInfos)
segments := make([]*querypb.SegmentInfo, 0)
for i := 0; i < segmentNum; i++ {
segments = append(segments, &querypb.SegmentInfo{
CollectionID: defaultCollectionID,
PartitionID: defaultPartitionID,
SegmentID: defaultSegmentID + int64(i),
DmChannel: "testDmChannel",
SegmentState: commonpb.SegmentState_Sealed,
NodeIds: []UniqueID{defaultQueryNodeID},
})
}
saves[defaultCollectionID] = segments
return saves
}

View File

@ -22,7 +22,9 @@ import (
"fmt"
"sync"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
@ -334,6 +336,9 @@ type dataCoordMock struct {
returnGrpcError bool
segmentState commonpb.SegmentState
errLevel int
globalLock sync.Mutex
segmentRefCount map[UniqueID]int
}
func newDataCoordMock(ctx context.Context) *dataCoordMock {
@ -350,6 +355,7 @@ func newDataCoordMock(ctx context.Context) *dataCoordMock {
baseSegmentID: defaultSegmentID,
channelNumPerCol: defaultChannelNum,
segmentState: commonpb.SegmentState_Flushed,
segmentRefCount: make(map[int64]int),
}
}
@ -460,6 +466,18 @@ func (data *dataCoordMock) AcquireSegmentLock(ctx context.Context, req *datapb.A
Reason: "AcquireSegmentLock failed",
}, nil
}
data.globalLock.Lock()
defer data.globalLock.Unlock()
log.Debug("acquire segment locks",
zap.Int64s("segments", req.SegmentIDs))
for _, segment := range req.SegmentIDs {
refCount := data.segmentRefCount[segment]
refCount++
data.segmentRefCount[segment] = refCount
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
@ -478,6 +496,20 @@ func (data *dataCoordMock) ReleaseSegmentLock(ctx context.Context, req *datapb.R
}, nil
}
data.globalLock.Lock()
defer data.globalLock.Unlock()
log.Debug("release segment locks",
zap.Int64s("segments", req.SegmentIDs))
for _, segment := range req.SegmentIDs {
refCount := data.segmentRefCount[segment]
if refCount > 0 {
refCount--
}
data.segmentRefCount[segment] = refCount
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil

View File

@ -42,6 +42,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
@ -50,10 +51,6 @@ import (
"github.com/milvus-io/milvus/internal/util/typeutil"
)
const (
handoffSegmentPrefix = "querycoord-handoff"
)
// UniqueID is an alias for the Int64 type
type UniqueID = typeutil.UniqueID
@ -72,14 +69,14 @@ type QueryCoord struct {
initOnce sync.Once
queryCoordID uint64
meta Meta
cluster Cluster
handler *channelUnsubscribeHandler
newNodeFn newQueryNodeFn
scheduler *TaskScheduler
idAllocator func() (UniqueID, error)
indexChecker *IndexChecker
queryCoordID uint64
meta Meta
cluster Cluster
handler *channelUnsubscribeHandler
newNodeFn newQueryNodeFn
scheduler *TaskScheduler
idAllocator func() (UniqueID, error)
handoffHandler *HandoffHandler
metricsCacheManager *metricsinfo.MetricsCacheManager
@ -207,7 +204,7 @@ func (qc *QueryCoord) Init() error {
}
// init index checker
qc.indexChecker, initError = newIndexChecker(qc.loopCtx, qc.kvClient, qc.meta, qc.cluster, qc.scheduler, qc.broker)
qc.handoffHandler, initError = newHandoffHandler(qc.loopCtx, qc.kvClient, qc.meta, qc.cluster, qc.scheduler, qc.broker)
if initError != nil {
log.Error("query coordinator init index checker failed", zap.Error(initError))
return
@ -224,7 +221,7 @@ func (qc *QueryCoord) Start() error {
qc.scheduler.Start()
log.Info("start scheduler ...")
qc.indexChecker.start()
qc.handoffHandler.Start()
log.Info("start index checker ...")
qc.handler.start()
@ -237,7 +234,7 @@ func (qc *QueryCoord) Start() error {
go qc.watchNodeLoop()
qc.loopWg.Add(1)
go qc.watchHandoffSegmentLoop()
go qc.handoffNotificationLoop()
if Params.QueryCoordCfg.AutoBalance {
qc.loopWg.Add(1)
@ -254,26 +251,26 @@ func (qc *QueryCoord) Stop() error {
qc.UpdateStateCode(internalpb.StateCode_Abnormal)
if qc.scheduler != nil {
log.Info("close scheduler...")
qc.scheduler.Close()
log.Info("close scheduler ...")
}
if qc.indexChecker != nil {
qc.indexChecker.close()
log.Info("close index checker ...")
if qc.handoffHandler != nil {
log.Info("close index checker...")
qc.handoffHandler.Stop()
}
if qc.handler != nil {
log.Info("close channel unsubscribe loop...")
qc.handler.close()
log.Info("close channel unsubscribe loop ...")
}
if qc.loopCancel != nil {
log.Info("cancel the loop of QueryCoord...")
qc.loopCancel()
log.Info("cancel the loop of QueryCoord")
}
log.Warn("Query Coord stopped successfully...")
log.Info("Query Coord stopped successfully...")
qc.loopWg.Wait()
qc.session.Revoke(time.Second)
return nil
@ -501,14 +498,14 @@ func (qc *QueryCoord) loadBalanceNodeLoop(ctx context.Context) {
}
}
func (qc *QueryCoord) watchHandoffSegmentLoop() {
func (qc *QueryCoord) handoffNotificationLoop() {
ctx, cancel := context.WithCancel(qc.loopCtx)
defer cancel()
defer qc.loopWg.Done()
log.Info("QueryCoord start watch segment loop")
watchChan := qc.kvClient.WatchWithRevision(handoffSegmentPrefix, qc.indexChecker.revision+1)
watchChan := qc.kvClient.WatchWithRevision(util.HandoffSegmentPrefix, qc.handoffHandler.revision+1)
for {
select {
@ -524,19 +521,8 @@ func (qc *QueryCoord) watchHandoffSegmentLoop() {
}
switch event.Type {
case mvccpb.PUT:
validHandoffReq, _ := qc.indexChecker.verifyHandoffReqValid(segmentInfo)
if Params.QueryCoordCfg.AutoHandoff && validHandoffReq {
qc.indexChecker.enqueueHandoffReq(segmentInfo)
log.Info("watchHandoffSegmentLoop: enqueue a handoff request to index checker", zap.Any("segment info", segmentInfo))
} else {
log.Info("watchHandoffSegmentLoop: collection/partition has not been loaded or autoHandoff equal to false, remove req from etcd", zap.Any("segmentInfo", segmentInfo))
buildQuerySegmentPath := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, segmentInfo.CollectionID, segmentInfo.PartitionID, segmentInfo.SegmentID)
err = qc.kvClient.Remove(buildQuerySegmentPath)
if err != nil {
log.Error("watchHandoffSegmentLoop: remove handoff segment from etcd failed", zap.Error(err))
panic(err)
}
}
qc.handoffHandler.enqueue(segmentInfo)
log.Info("watchHandoffSegmentLoop: enqueue a handoff request to index checker", zap.Any("segment info", segmentInfo))
default:
// do nothing
}

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/sessionutil"
@ -305,7 +306,7 @@ func TestHandoffSegmentLoop(t *testing.T) {
SegmentState: commonpb.SegmentState_Sealed,
}
key := fmt.Sprintf("%s/%d/%d/%d", handoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
key := fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, defaultCollectionID, defaultPartitionID, defaultSegmentID)
value, err := proto.Marshal(segmentInfo)
assert.Nil(t, err)
err = queryCoord.kvClient.Save(key, string(value))
@ -458,7 +459,8 @@ func TestHandoffSegmentLoop(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HandoffSegments,
},
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
ReleasedSegments: []int64{defaultSegmentID, defaultSegmentID + 1},
}
handoffTask := &handoffTask{
baseTask: baseTask,

View File

@ -85,6 +85,7 @@ func waitAllQueryNodeOffline(cluster Cluster, nodeIDs ...int64) bool {
func waitQueryNodeOnline(cluster Cluster, nodeID int64) {
for {
log.Debug("waiting for query node online...")
online, err := cluster.IsOnline(nodeID)
if err != nil {
continue
@ -92,6 +93,8 @@ func waitQueryNodeOnline(cluster Cluster, nodeID int64) {
if online {
return
}
time.Sleep(500 * time.Millisecond)
}
}

View File

@ -1647,16 +1647,7 @@ func (ht *handoffTask) execute(ctx context.Context) error {
continue
}
// segment which is compacted from should exist in query node
for _, compactedSegID := range segmentInfo.CompactionFrom {
_, err = ht.meta.getSegmentInfoByID(compactedSegID)
if err != nil {
log.Error("handoffTask: compacted segment has not been loaded into memory", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("segmentID", segmentID))
ht.setResultInfo(err)
return err
}
}
// TODO we don't ensure atomicity here, so this could stuck forever
// segment which is compacted to should not exist in query node
_, err = ht.meta.getSegmentInfoByID(segmentID)
if err != nil {

View File

@ -432,6 +432,7 @@ func (scheduler *TaskScheduler) unmarshalTask(taskID UniqueID, t string) (task,
// Enqueue pushs a trigger task to triggerTaskQueue and assigns task id
func (scheduler *TaskScheduler) Enqueue(t task) error {
// TODO, loadbalance, handoff and other task may not want to be persisted
id, err := scheduler.taskIDAllocator()
if err != nil {
log.Error("allocator trigger taskID failed", zap.Error(err))
@ -920,37 +921,40 @@ func (scheduler *TaskScheduler) BindContext(ctx context.Context) (context.Contex
func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta) error {
segmentInfosToSave := make(map[UniqueID][]*querypb.SegmentInfo)
segmentInfosToRemove := make(map[UniqueID][]*querypb.SegmentInfo)
//var sealedSegmentChangeInfos col2SealedSegmentChangeInfos
var err error
switch triggerTask.msgType() {
case commonpb.MsgType_ReleaseCollection:
// release all segmentInfo of the collection when release collection
req := triggerTask.(*releaseCollectionTask).ReleaseCollectionRequest
collectionID := req.CollectionID
_, err = meta.removeGlobalSealedSegInfos(collectionID, nil)
err = meta.removeGlobalSealedSegInfos(collectionID, nil)
case commonpb.MsgType_ReleasePartitions:
// release all segmentInfo of the partitions when release partitions
req := triggerTask.(*releasePartitionTask).ReleasePartitionsRequest
collectionID := req.CollectionID
segmentInfos := meta.showSegmentInfos(collectionID, req.PartitionIDs)
for _, info := range segmentInfos {
if info.CollectionID == collectionID {
if _, ok := segmentInfosToRemove[collectionID]; !ok {
segmentInfosToRemove[collectionID] = make([]*querypb.SegmentInfo, 0)
}
segmentInfosToRemove[collectionID] = append(segmentInfosToRemove[collectionID], info)
err = meta.removeGlobalSealedSegInfos(collectionID, req.PartitionIDs)
case commonpb.MsgType_HandoffSegments:
// remove released segments
req := triggerTask.(*handoffTask).HandoffSegmentsRequest
collectionID := req.SegmentInfos[0].CollectionID
offlineInfos := make([]*querypb.SegmentInfo, 0)
for _, releasedSegmentID := range req.ReleasedSegments {
info, err := meta.getSegmentInfoByID(releasedSegmentID)
if err != nil {
// might be a retry, this is no correct but so far we will take it
log.Warn("failed to find offline segment info while handoff, ignore it", zap.Int64("segmentID", releasedSegmentID), zap.Error(err))
} else {
offlineInfos = append(offlineInfos, info)
}
}
_, err = meta.removeGlobalSealedSegInfos(collectionID, req.PartitionIDs)
segmentInfosToRemove[collectionID] = offlineInfos
// still run default case to handle load segments
fallthrough
default:
// save new segmentInfo when load segment
var (
segments = make(map[UniqueID]*querypb.SegmentInfo)
)
segments := make(map[UniqueID]*querypb.SegmentInfo)
for _, childTask := range triggerTask.getChildTask() {
if childTask.msgType() == commonpb.MsgType_LoadSegments {
req := childTask.(*loadSegmentTask).LoadSegmentsRequest
@ -999,8 +1003,10 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
log.Info("update segment info",
zap.Int64("triggerTaskID", triggerTask.getTaskID()),
zap.Any("segment", segmentInfosToSave))
_, err = meta.saveGlobalSealedSegInfos(segmentInfosToSave)
zap.Any("segmentToSave", segmentInfosToSave),
zap.Any("segmentToRemove", segmentInfosToRemove),
)
err = meta.saveGlobalSealedSegInfos(segmentInfosToSave, segmentInfosToRemove)
}
// no need to rollback since etcd meta is not changed
@ -1011,30 +1017,6 @@ func updateSegmentInfoFromTask(ctx context.Context, triggerTask task, meta Meta)
return nil
}
func reverseSealedSegmentChangeInfo(changeInfosMap map[UniqueID]*querypb.SealedSegmentsChangeInfo) map[UniqueID]*querypb.SealedSegmentsChangeInfo {
result := make(map[UniqueID]*querypb.SealedSegmentsChangeInfo)
for collectionID, changeInfos := range changeInfosMap {
segmentChangeInfos := &querypb.SealedSegmentsChangeInfo{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_SealedSegmentsChangeInfo,
},
Infos: []*querypb.SegmentChangeInfo{},
}
for _, info := range changeInfos.Infos {
changeInfo := &querypb.SegmentChangeInfo{
OnlineNodeID: info.OfflineNodeID,
OnlineSegments: info.OfflineSegments,
OfflineNodeID: info.OnlineNodeID,
OfflineSegments: info.OnlineSegments,
}
segmentChangeInfos.Infos = append(segmentChangeInfos.Infos, changeInfo)
}
result[collectionID] = segmentChangeInfos
}
return result
}
// generateDerivedInternalTasks generate watchDeltaChannel and watchQueryChannel tasks
func generateDerivedInternalTasks(triggerTask task, meta Meta, cluster Cluster) ([]task, error) {
var derivedInternalTasks []task

View File

@ -775,43 +775,6 @@ func Test_AssignInternalTask(t *testing.T) {
assert.Nil(t, err)
}
func Test_reverseSealedSegmentChangeInfo(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
node1, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
defer node1.stop()
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
queryCoord.scheduler.Enqueue(loadCollectionTask)
waitTaskFinalState(loadCollectionTask, taskExpired)
node2, err := startQueryNodeServer(ctx)
assert.Nil(t, err)
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
defer node2.stop()
loadSegmentTask := genLoadSegmentTask(ctx, queryCoord, node2.queryNodeID)
parentTask := loadSegmentTask.parentTask
kv := &testKv{
returnFn: failedResult,
}
queryCoord.meta.setKvClient(kv)
assert.Panics(t, func() {
updateSegmentInfoFromTask(ctx, parentTask, queryCoord.meta)
})
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
}
func Test_handoffSegmentFail(t *testing.T) {
refreshParams()
ctx := context.Background()
@ -1413,6 +1376,7 @@ func startMockCoord(ctx context.Context) (*QueryCoord, error) {
channelNumPerCol: defaultChannelNum,
segmentState: commonpb.SegmentState_Flushed,
errLevel: 1,
segmentRefCount: make(map[int64]int),
}
indexCoord, err := newIndexCoordMock(queryCoordTestDir)
if err != nil {

View File

@ -2243,21 +2243,6 @@ func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlus
log.Info("SegmentFlushCompleted received", zap.Int64("msgID", in.Base.MsgID), zap.Int64("collID", in.Segment.CollectionID),
zap.Int64("partID", in.Segment.PartitionID), zap.Int64("segID", in.Segment.ID), zap.Int64s("compactFrom", in.Segment.CompactionFrom))
// acquire reference lock before building index
if in.Segment.CreatedByCompaction {
log.Debug("try to acquire segment reference lock", zap.Int64("task id", in.Base.MsgID), zap.Int64s("segmentIDs", in.Segment.CompactionFrom))
if err := c.CallAddSegRefLock(ctx, in.Base.MsgID, in.Segment.CompactionFrom); err != nil {
log.Warn("acquire segment reference lock failed", zap.Int64("task id", in.Base.MsgID), zap.Int64s("segmentIDs", in.Segment.CompactionFrom))
return failStatus(commonpb.ErrorCode_UnexpectedError, "AcquireSegRefLock failed: "+err.Error()), nil
}
defer func() {
if err := c.CallReleaseSegRefLock(ctx, in.Base.MsgID, in.Segment.CompactionFrom); err != nil {
log.Warn("release segment reference lock failed", zap.Int64("task id", in.Base.MsgID), zap.Int64s("segmentIDs", in.Segment.CompactionFrom))
// panic to let ref manager detect release failure
panic(err)
}
}()
}
err = c.createIndexForSegment(ctx, in.Segment.CollectionID, in.Segment.PartitionID, in.Segment.ID, in.Segment.NumOfRows, in.Segment.Binlogs)
if err != nil {

View File

@ -20,6 +20,7 @@ package util
const (
SegmentMetaPrefix = "queryCoord-segmentMeta"
ChangeInfoMetaPrefix = "queryCoord-sealedSegmentChangeInfo"
HandoffSegmentPrefix = "querycoord-handoff"
HeaderAuthorize = "authorization"
// HeaderSourceID identify requests from Milvus members and client requests
HeaderSourceID = "sourceId"