From 1f718118e9966d933a52b35eceb96a80927dbb14 Mon Sep 17 00:00:00 2001 From: "yihao.dai" Date: Mon, 20 Mar 2023 14:55:57 +0800 Subject: [PATCH] Dynamic load/release partitions (#22655) Signed-off-by: bigsheeper --- .../distributed/querycoord/client/client.go | 19 + .../querycoord/client/client_test.go | 3 + internal/distributed/querycoord/service.go | 5 + .../distributed/querynode/client/client.go | 18 + .../querynode/client/client_test.go | 3 + internal/distributed/querynode/service.go | 5 + .../distributed/querynode/service_test.go | 11 + internal/metastore/catalog.go | 4 +- internal/metrics/querycoord_metrics.go | 8 +- internal/proto/query_coord.proto | 15 +- internal/proto/querypb/query_coord.pb.go | 739 +++++++++++------- internal/proxy/proxy_test.go | 2 +- internal/proxy/query_node_mock_test.go | 4 + internal/proxy/task.go | 29 +- internal/proxy/task_test.go | 29 - .../balance/rowcount_based_balancer.go | 5 +- .../balance/rowcount_based_balancer_test.go | 5 + .../checkers/channel_checker_test.go | 2 + .../checkers/segment_checker_test.go | 2 + internal/querycoordv2/job/errors.go | 1 + internal/querycoordv2/job/job.go | 450 ----------- internal/querycoordv2/job/job_load.go | 397 ++++++++++ internal/querycoordv2/job/job_release.go | 176 +++++ internal/querycoordv2/job/job_sync.go | 103 +++ internal/querycoordv2/job/job_test.go | 470 +++++++++-- internal/querycoordv2/job/undo.go | 68 ++ internal/querycoordv2/job/utils.go | 64 +- .../querycoordv2/meta/collection_manager.go | 245 +++--- .../meta/collection_manager_test.go | 211 +++-- internal/querycoordv2/meta/mock_store.go | 71 +- internal/querycoordv2/meta/store.go | 42 +- internal/querycoordv2/meta/store_test.go | 33 + internal/querycoordv2/meta/target_manager.go | 52 +- .../querycoordv2/meta/target_manager_test.go | 3 +- internal/querycoordv2/mocks/mock_querynode.go | 121 ++- .../observers/collection_observer.go | 107 +-- .../observers/collection_observer_test.go | 119 ++- .../observers/leader_observer_test.go | 6 + .../querycoordv2/observers/target_observer.go | 17 +- .../observers/target_observer_test.go | 8 +- internal/querycoordv2/server.go | 12 +- internal/querycoordv2/server_test.go | 42 +- internal/querycoordv2/services.go | 111 ++- internal/querycoordv2/services_test.go | 96 +-- internal/querycoordv2/session/cluster.go | 30 + internal/querycoordv2/session/cluster_test.go | 55 ++ internal/querycoordv2/session/mock_cluster.go | 146 +++- internal/querycoordv2/task/executor.go | 4 +- internal/querycoordv2/task/task_test.go | 20 +- internal/querycoordv2/utils/meta.go | 18 +- .../querynode/flow_graph_delete_node_test.go | 2 +- .../flow_graph_filter_delete_node.go | 12 +- .../flow_graph_filter_delete_node_test.go | 2 +- .../querynode/flow_graph_filter_dm_node.go | 24 +- .../flow_graph_filter_dm_node_test.go | 23 - internal/querynode/flow_graph_insert_node.go | 9 - .../querynode/flow_graph_insert_node_test.go | 2 +- internal/querynode/impl.go | 65 +- internal/querynode/impl_test.go | 49 +- internal/querynode/meta_replica.go | 26 +- internal/querynode/search_test.go | 1 + internal/querynode/statistic_test.go | 8 + internal/querynode/validate_test.go | 3 + internal/rootcoord/broker.go | 45 ++ internal/rootcoord/create_partition_task.go | 20 +- .../rootcoord/create_partition_task_test.go | 16 +- internal/rootcoord/drop_partition_task.go | 7 +- .../rootcoord/drop_partition_task_test.go | 3 + internal/rootcoord/meta_table.go | 2 +- internal/rootcoord/meta_table_test.go | 4 +- internal/rootcoord/mock_test.go | 22 +- internal/rootcoord/step.go | 38 + internal/types/mock_querycoord.go | 147 ++-- internal/types/types.go | 2 + internal/util/mock/grpc_querycoord_client.go | 4 + internal/util/mock/grpc_querynode_client.go | 4 + internal/util/mock/querynode_client.go | 4 + .../testcases/test_collection.py | 8 +- 78 files changed, 3184 insertions(+), 1574 deletions(-) create mode 100644 internal/querycoordv2/job/job_load.go create mode 100644 internal/querycoordv2/job/job_release.go create mode 100644 internal/querycoordv2/job/job_sync.go create mode 100644 internal/querycoordv2/job/undo.go diff --git a/internal/distributed/querycoord/client/client.go b/internal/distributed/querycoord/client/client.go index 61e681f24b..07da8a1ba6 100644 --- a/internal/distributed/querycoord/client/client.go +++ b/internal/distributed/querycoord/client/client.go @@ -272,6 +272,25 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart return ret.(*commonpb.Status), err } +// SyncNewCreatedPartition notifies QueryCoord to sync new created partition if collection is loaded. +func (c *Client) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.SyncNewCreatedPartition(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + // GetPartitionStates gets the states of the specified partition. func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) { req = typeutil.Clone(req) diff --git a/internal/distributed/querycoord/client/client_test.go b/internal/distributed/querycoord/client/client_test.go index d1819d3cdf..4a381c8b04 100644 --- a/internal/distributed/querycoord/client/client_test.go +++ b/internal/distributed/querycoord/client/client_test.go @@ -114,6 +114,9 @@ func Test_NewClient(t *testing.T) { r7, err := client.ReleasePartitions(ctx, nil) retCheck(retNotNil, r7, err) + r7, err = client.SyncNewCreatedPartition(ctx, nil) + retCheck(retNotNil, r7, err) + r8, err := client.ShowCollections(ctx, nil) retCheck(retNotNil, r8, err) diff --git a/internal/distributed/querycoord/service.go b/internal/distributed/querycoord/service.go index 87f1eb2795..f771203ec9 100644 --- a/internal/distributed/querycoord/service.go +++ b/internal/distributed/querycoord/service.go @@ -329,6 +329,11 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart return s.queryCoord.ReleasePartitions(ctx, req) } +// SyncNewCreatedPartition notifies QueryCoord to sync new created partition if collection is loaded. +func (s *Server) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest) (*commonpb.Status, error) { + return s.queryCoord.SyncNewCreatedPartition(ctx, req) +} + // GetSegmentInfo gets the information of the specified segment from QueryCoord. func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) { return s.queryCoord.GetSegmentInfo(ctx, req) diff --git a/internal/distributed/querynode/client/client.go b/internal/distributed/querynode/client/client.go index 2e9546bdd0..0236382d22 100644 --- a/internal/distributed/querynode/client/client.go +++ b/internal/distributed/querynode/client/client.go @@ -213,6 +213,24 @@ func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl return ret.(*commonpb.Status), err } +// LoadPartitions updates partitions meta info in QueryNode. +func (c *Client) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID())) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.LoadPartitions(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + // ReleasePartitions releases the data of the specified partitions in QueryNode. func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { req = typeutil.Clone(req) diff --git a/internal/distributed/querynode/client/client_test.go b/internal/distributed/querynode/client/client_test.go index 197c548d49..fe544f1478 100644 --- a/internal/distributed/querynode/client/client_test.go +++ b/internal/distributed/querynode/client/client_test.go @@ -78,6 +78,9 @@ func Test_NewClient(t *testing.T) { r8, err := client.ReleaseCollection(ctx, nil) retCheck(retNotNil, r8, err) + r8, err = client.LoadPartitions(ctx, nil) + retCheck(retNotNil, r8, err) + r9, err := client.ReleasePartitions(ctx, nil) retCheck(retNotNil, r9, err) diff --git a/internal/distributed/querynode/service.go b/internal/distributed/querynode/service.go index 6687e58005..70e611617d 100644 --- a/internal/distributed/querynode/service.go +++ b/internal/distributed/querynode/service.go @@ -276,6 +276,11 @@ func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl return s.querynode.ReleaseCollection(ctx, req) } +// LoadPartitions updates partitions meta info in QueryNode. +func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + return s.querynode.LoadPartitions(ctx, req) +} + // ReleasePartitions releases the data of the specified partitions in QueryNode. func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { // ignore ctx diff --git a/internal/distributed/querynode/service_test.go b/internal/distributed/querynode/service_test.go index d8d10d9037..4cef0dd073 100644 --- a/internal/distributed/querynode/service_test.go +++ b/internal/distributed/querynode/service_test.go @@ -94,6 +94,10 @@ func (m *MockQueryNode) ReleaseCollection(ctx context.Context, req *querypb.Rele return m.status, m.err } +func (m *MockQueryNode) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + return m.status, m.err +} + func (m *MockQueryNode) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { return m.status, m.err } @@ -263,6 +267,13 @@ func Test_NewServer(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) }) + t.Run("LoadPartitions", func(t *testing.T) { + req := &querypb.LoadPartitionsRequest{} + resp, err := server.LoadPartitions(ctx, req) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) + }) + t.Run("ReleasePartitions", func(t *testing.T) { req := &querypb.ReleasePartitionsRequest{} resp, err := server.ReleasePartitions(ctx, req) diff --git a/internal/metastore/catalog.go b/internal/metastore/catalog.go index e3ec81b005..3fde11b412 100644 --- a/internal/metastore/catalog.go +++ b/internal/metastore/catalog.go @@ -150,13 +150,13 @@ type IndexCoordCatalog interface { } type QueryCoordCatalog interface { - SaveCollection(info *querypb.CollectionLoadInfo) error + SaveCollection(collection *querypb.CollectionLoadInfo, partitions ...*querypb.PartitionLoadInfo) error SavePartition(info ...*querypb.PartitionLoadInfo) error SaveReplica(replica *querypb.Replica) error GetCollections() ([]*querypb.CollectionLoadInfo, error) GetPartitions() (map[int64][]*querypb.PartitionLoadInfo, error) GetReplicas() ([]*querypb.Replica, error) - ReleaseCollection(id int64) error + ReleaseCollection(collection int64) error ReleasePartition(collection int64, partitions ...int64) error ReleaseReplicas(collectionID int64) error ReleaseReplica(collection, replica int64) error diff --git a/internal/metrics/querycoord_metrics.go b/internal/metrics/querycoord_metrics.go index d206667c3a..6b9e4a2274 100644 --- a/internal/metrics/querycoord_metrics.go +++ b/internal/metrics/querycoord_metrics.go @@ -31,12 +31,12 @@ var ( Help: "number of collections", }, []string{}) - QueryCoordNumEntities = prometheus.NewGaugeVec( + QueryCoordNumPartitions = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: milvusNamespace, Subsystem: typeutil.QueryCoordRole, - Name: "entity_num", - Help: "number of entities", + Name: "partition_num", + Help: "number of partitions", }, []string{}) QueryCoordLoadCount = prometheus.NewCounterVec( @@ -97,7 +97,7 @@ var ( // RegisterQueryCoord registers QueryCoord metrics func RegisterQueryCoord(registry *prometheus.Registry) { registry.MustRegister(QueryCoordNumCollections) - registry.MustRegister(QueryCoordNumEntities) + registry.MustRegister(QueryCoordNumPartitions) registry.MustRegister(QueryCoordLoadCount) registry.MustRegister(QueryCoordReleaseCount) registry.MustRegister(QueryCoordLoadLatency) diff --git a/internal/proto/query_coord.proto b/internal/proto/query_coord.proto index 907982e572..4bdc562cd3 100644 --- a/internal/proto/query_coord.proto +++ b/internal/proto/query_coord.proto @@ -23,6 +23,7 @@ service QueryCoord { rpc ReleasePartitions(ReleasePartitionsRequest) returns (common.Status) {} rpc LoadCollection(LoadCollectionRequest) returns (common.Status) {} rpc ReleaseCollection(ReleaseCollectionRequest) returns (common.Status) {} + rpc SyncNewCreatedPartition(SyncNewCreatedPartitionRequest) returns (common.Status) {} rpc GetPartitionStates(GetPartitionStatesRequest) returns (GetPartitionStatesResponse) {} rpc GetSegmentInfo(GetSegmentInfoRequest) returns (GetSegmentInfoResponse) {} @@ -55,6 +56,7 @@ service QueryNode { rpc UnsubDmChannel(UnsubDmChannelRequest) returns (common.Status) {} rpc LoadSegments(LoadSegmentsRequest) returns (common.Status) {} rpc ReleaseCollection(ReleaseCollectionRequest) returns (common.Status) {} + rpc LoadPartitions(LoadPartitionsRequest) returns (common.Status) {} rpc ReleasePartitions(ReleasePartitionsRequest) returns (common.Status) {} rpc ReleaseSegments(ReleaseSegmentsRequest) returns (common.Status) {} rpc GetSegmentInfo(GetSegmentInfoRequest) returns (GetSegmentInfoResponse) {} @@ -189,6 +191,12 @@ message ShardLeadersList { // All leaders of all replicas of one shard repeated string node_addrs = 3; } +message SyncNewCreatedPartitionRequest { + common.MsgBase base = 1; + int64 collectionID = 2; + int64 partitionID = 3; +} + //-----------------query node grpc request and response proto---------------- message LoadMetaInfo { LoadType load_type = 1; @@ -482,18 +490,19 @@ enum LoadStatus { message CollectionLoadInfo { int64 collectionID = 1; - repeated int64 released_partitions = 2; + repeated int64 released_partitions = 2; // Deprecated: No longer used; kept for compatibility. int32 replica_number = 3; LoadStatus status = 4; map field_indexID = 5; + LoadType load_type = 6; } message PartitionLoadInfo { int64 collectionID = 1; int64 partitionID = 2; - int32 replica_number = 3; + int32 replica_number = 3; // Deprecated: No longer used; kept for compatibility. LoadStatus status = 4; - map field_indexID = 5; + map field_indexID = 5; // Deprecated: No longer used; kept for compatibility. } message Replica { diff --git a/internal/proto/querypb/query_coord.pb.go b/internal/proto/querypb/query_coord.pb.go index 3f074980fe..e3b02207b3 100644 --- a/internal/proto/querypb/query_coord.pb.go +++ b/internal/proto/querypb/query_coord.pb.go @@ -1222,6 +1222,61 @@ func (m *ShardLeadersList) GetNodeAddrs() []string { return nil } +type SyncNewCreatedPartitionRequest struct { + Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` + CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"` + PartitionID int64 `protobuf:"varint,3,opt,name=partitionID,proto3" json:"partitionID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SyncNewCreatedPartitionRequest) Reset() { *m = SyncNewCreatedPartitionRequest{} } +func (m *SyncNewCreatedPartitionRequest) String() string { return proto.CompactTextString(m) } +func (*SyncNewCreatedPartitionRequest) ProtoMessage() {} +func (*SyncNewCreatedPartitionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{16} +} + +func (m *SyncNewCreatedPartitionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SyncNewCreatedPartitionRequest.Unmarshal(m, b) +} +func (m *SyncNewCreatedPartitionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SyncNewCreatedPartitionRequest.Marshal(b, m, deterministic) +} +func (m *SyncNewCreatedPartitionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncNewCreatedPartitionRequest.Merge(m, src) +} +func (m *SyncNewCreatedPartitionRequest) XXX_Size() int { + return xxx_messageInfo_SyncNewCreatedPartitionRequest.Size(m) +} +func (m *SyncNewCreatedPartitionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SyncNewCreatedPartitionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncNewCreatedPartitionRequest proto.InternalMessageInfo + +func (m *SyncNewCreatedPartitionRequest) GetBase() *commonpb.MsgBase { + if m != nil { + return m.Base + } + return nil +} + +func (m *SyncNewCreatedPartitionRequest) GetCollectionID() int64 { + if m != nil { + return m.CollectionID + } + return 0 +} + +func (m *SyncNewCreatedPartitionRequest) GetPartitionID() int64 { + if m != nil { + return m.PartitionID + } + return 0 +} + //-----------------query node grpc request and response proto---------------- type LoadMetaInfo struct { LoadType LoadType `protobuf:"varint,1,opt,name=load_type,json=loadType,proto3,enum=milvus.proto.query.LoadType" json:"load_type,omitempty"` @@ -1236,7 +1291,7 @@ func (m *LoadMetaInfo) Reset() { *m = LoadMetaInfo{} } func (m *LoadMetaInfo) String() string { return proto.CompactTextString(m) } func (*LoadMetaInfo) ProtoMessage() {} func (*LoadMetaInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{16} + return fileDescriptor_aab7cc9a69ed26e8, []int{17} } func (m *LoadMetaInfo) XXX_Unmarshal(b []byte) error { @@ -1302,7 +1357,7 @@ func (m *WatchDmChannelsRequest) Reset() { *m = WatchDmChannelsRequest{} func (m *WatchDmChannelsRequest) String() string { return proto.CompactTextString(m) } func (*WatchDmChannelsRequest) ProtoMessage() {} func (*WatchDmChannelsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{17} + return fileDescriptor_aab7cc9a69ed26e8, []int{18} } func (m *WatchDmChannelsRequest) XXX_Unmarshal(b []byte) error { @@ -1421,7 +1476,7 @@ func (m *UnsubDmChannelRequest) Reset() { *m = UnsubDmChannelRequest{} } func (m *UnsubDmChannelRequest) String() string { return proto.CompactTextString(m) } func (*UnsubDmChannelRequest) ProtoMessage() {} func (*UnsubDmChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{18} + return fileDescriptor_aab7cc9a69ed26e8, []int{19} } func (m *UnsubDmChannelRequest) XXX_Unmarshal(b []byte) error { @@ -1494,7 +1549,7 @@ func (m *SegmentLoadInfo) Reset() { *m = SegmentLoadInfo{} } func (m *SegmentLoadInfo) String() string { return proto.CompactTextString(m) } func (*SegmentLoadInfo) ProtoMessage() {} func (*SegmentLoadInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{19} + return fileDescriptor_aab7cc9a69ed26e8, []int{20} } func (m *SegmentLoadInfo) XXX_Unmarshal(b []byte) error { @@ -1634,7 +1689,7 @@ func (m *FieldIndexInfo) Reset() { *m = FieldIndexInfo{} } func (m *FieldIndexInfo) String() string { return proto.CompactTextString(m) } func (*FieldIndexInfo) ProtoMessage() {} func (*FieldIndexInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{20} + return fileDescriptor_aab7cc9a69ed26e8, []int{21} } func (m *FieldIndexInfo) XXX_Unmarshal(b []byte) error { @@ -1746,7 +1801,7 @@ func (m *LoadSegmentsRequest) Reset() { *m = LoadSegmentsRequest{} } func (m *LoadSegmentsRequest) String() string { return proto.CompactTextString(m) } func (*LoadSegmentsRequest) ProtoMessage() {} func (*LoadSegmentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{21} + return fileDescriptor_aab7cc9a69ed26e8, []int{22} } func (m *LoadSegmentsRequest) XXX_Unmarshal(b []byte) error { @@ -1864,7 +1919,7 @@ func (m *ReleaseSegmentsRequest) Reset() { *m = ReleaseSegmentsRequest{} func (m *ReleaseSegmentsRequest) String() string { return proto.CompactTextString(m) } func (*ReleaseSegmentsRequest) ProtoMessage() {} func (*ReleaseSegmentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{22} + return fileDescriptor_aab7cc9a69ed26e8, []int{23} } func (m *ReleaseSegmentsRequest) XXX_Unmarshal(b []byte) error { @@ -1963,7 +2018,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{23} + return fileDescriptor_aab7cc9a69ed26e8, []int{24} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -2034,7 +2089,7 @@ func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{24} + return fileDescriptor_aab7cc9a69ed26e8, []int{25} } func (m *QueryRequest) XXX_Unmarshal(b []byte) error { @@ -2103,7 +2158,7 @@ func (m *SyncReplicaSegmentsRequest) Reset() { *m = SyncReplicaSegmentsR func (m *SyncReplicaSegmentsRequest) String() string { return proto.CompactTextString(m) } func (*SyncReplicaSegmentsRequest) ProtoMessage() {} func (*SyncReplicaSegmentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{25} + return fileDescriptor_aab7cc9a69ed26e8, []int{26} } func (m *SyncReplicaSegmentsRequest) XXX_Unmarshal(b []byte) error { @@ -2159,7 +2214,7 @@ func (m *ReplicaSegmentsInfo) Reset() { *m = ReplicaSegmentsInfo{} } func (m *ReplicaSegmentsInfo) String() string { return proto.CompactTextString(m) } func (*ReplicaSegmentsInfo) ProtoMessage() {} func (*ReplicaSegmentsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{26} + return fileDescriptor_aab7cc9a69ed26e8, []int{27} } func (m *ReplicaSegmentsInfo) XXX_Unmarshal(b []byte) error { @@ -2222,7 +2277,7 @@ func (m *HandoffSegmentsRequest) Reset() { *m = HandoffSegmentsRequest{} func (m *HandoffSegmentsRequest) String() string { return proto.CompactTextString(m) } func (*HandoffSegmentsRequest) ProtoMessage() {} func (*HandoffSegmentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{27} + return fileDescriptor_aab7cc9a69ed26e8, []int{28} } func (m *HandoffSegmentsRequest) XXX_Unmarshal(b []byte) error { @@ -2280,7 +2335,7 @@ func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } func (*LoadBalanceRequest) ProtoMessage() {} func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{28} + return fileDescriptor_aab7cc9a69ed26e8, []int{29} } func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { @@ -2358,7 +2413,7 @@ func (m *DmChannelWatchInfo) Reset() { *m = DmChannelWatchInfo{} } func (m *DmChannelWatchInfo) String() string { return proto.CompactTextString(m) } func (*DmChannelWatchInfo) ProtoMessage() {} func (*DmChannelWatchInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{29} + return fileDescriptor_aab7cc9a69ed26e8, []int{30} } func (m *DmChannelWatchInfo) XXX_Unmarshal(b []byte) error { @@ -2429,7 +2484,7 @@ func (m *QueryChannelInfo) Reset() { *m = QueryChannelInfo{} } func (m *QueryChannelInfo) String() string { return proto.CompactTextString(m) } func (*QueryChannelInfo) ProtoMessage() {} func (*QueryChannelInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{30} + return fileDescriptor_aab7cc9a69ed26e8, []int{31} } func (m *QueryChannelInfo) XXX_Unmarshal(b []byte) error { @@ -2498,7 +2553,7 @@ func (m *PartitionStates) Reset() { *m = PartitionStates{} } func (m *PartitionStates) String() string { return proto.CompactTextString(m) } func (*PartitionStates) ProtoMessage() {} func (*PartitionStates) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{31} + return fileDescriptor_aab7cc9a69ed26e8, []int{32} } func (m *PartitionStates) XXX_Unmarshal(b []byte) error { @@ -2568,7 +2623,7 @@ func (m *SegmentInfo) Reset() { *m = SegmentInfo{} } func (m *SegmentInfo) String() string { return proto.CompactTextString(m) } func (*SegmentInfo) ProtoMessage() {} func (*SegmentInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{32} + return fileDescriptor_aab7cc9a69ed26e8, []int{33} } func (m *SegmentInfo) XXX_Unmarshal(b []byte) error { @@ -2727,7 +2782,7 @@ func (m *CollectionInfo) Reset() { *m = CollectionInfo{} } func (m *CollectionInfo) String() string { return proto.CompactTextString(m) } func (*CollectionInfo) ProtoMessage() {} func (*CollectionInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{33} + return fileDescriptor_aab7cc9a69ed26e8, []int{34} } func (m *CollectionInfo) XXX_Unmarshal(b []byte) error { @@ -2823,7 +2878,7 @@ func (m *UnsubscribeChannels) Reset() { *m = UnsubscribeChannels{} } func (m *UnsubscribeChannels) String() string { return proto.CompactTextString(m) } func (*UnsubscribeChannels) ProtoMessage() {} func (*UnsubscribeChannels) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{34} + return fileDescriptor_aab7cc9a69ed26e8, []int{35} } func (m *UnsubscribeChannels) XXX_Unmarshal(b []byte) error { @@ -2870,7 +2925,7 @@ func (m *UnsubscribeChannelInfo) Reset() { *m = UnsubscribeChannelInfo{} func (m *UnsubscribeChannelInfo) String() string { return proto.CompactTextString(m) } func (*UnsubscribeChannelInfo) ProtoMessage() {} func (*UnsubscribeChannelInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{35} + return fileDescriptor_aab7cc9a69ed26e8, []int{36} } func (m *UnsubscribeChannelInfo) XXX_Unmarshal(b []byte) error { @@ -2920,7 +2975,7 @@ func (m *SegmentChangeInfo) Reset() { *m = SegmentChangeInfo{} } func (m *SegmentChangeInfo) String() string { return proto.CompactTextString(m) } func (*SegmentChangeInfo) ProtoMessage() {} func (*SegmentChangeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{36} + return fileDescriptor_aab7cc9a69ed26e8, []int{37} } func (m *SegmentChangeInfo) XXX_Unmarshal(b []byte) error { @@ -2981,7 +3036,7 @@ func (m *SealedSegmentsChangeInfo) Reset() { *m = SealedSegmentsChangeIn func (m *SealedSegmentsChangeInfo) String() string { return proto.CompactTextString(m) } func (*SealedSegmentsChangeInfo) ProtoMessage() {} func (*SealedSegmentsChangeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{37} + return fileDescriptor_aab7cc9a69ed26e8, []int{38} } func (m *SealedSegmentsChangeInfo) XXX_Unmarshal(b []byte) error { @@ -3027,7 +3082,7 @@ func (m *GetDataDistributionRequest) Reset() { *m = GetDataDistributionR func (m *GetDataDistributionRequest) String() string { return proto.CompactTextString(m) } func (*GetDataDistributionRequest) ProtoMessage() {} func (*GetDataDistributionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{38} + return fileDescriptor_aab7cc9a69ed26e8, []int{39} } func (m *GetDataDistributionRequest) XXX_Unmarshal(b []byte) error { @@ -3070,7 +3125,7 @@ func (m *GetDataDistributionResponse) Reset() { *m = GetDataDistribution func (m *GetDataDistributionResponse) String() string { return proto.CompactTextString(m) } func (*GetDataDistributionResponse) ProtoMessage() {} func (*GetDataDistributionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{39} + return fileDescriptor_aab7cc9a69ed26e8, []int{40} } func (m *GetDataDistributionResponse) XXX_Unmarshal(b []byte) error { @@ -3141,7 +3196,7 @@ func (m *LeaderView) Reset() { *m = LeaderView{} } func (m *LeaderView) String() string { return proto.CompactTextString(m) } func (*LeaderView) ProtoMessage() {} func (*LeaderView) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{40} + return fileDescriptor_aab7cc9a69ed26e8, []int{41} } func (m *LeaderView) XXX_Unmarshal(b []byte) error { @@ -3209,7 +3264,7 @@ func (m *SegmentDist) Reset() { *m = SegmentDist{} } func (m *SegmentDist) String() string { return proto.CompactTextString(m) } func (*SegmentDist) ProtoMessage() {} func (*SegmentDist) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{41} + return fileDescriptor_aab7cc9a69ed26e8, []int{42} } func (m *SegmentDist) XXX_Unmarshal(b []byte) error { @@ -3259,7 +3314,7 @@ func (m *SegmentVersionInfo) Reset() { *m = SegmentVersionInfo{} } func (m *SegmentVersionInfo) String() string { return proto.CompactTextString(m) } func (*SegmentVersionInfo) ProtoMessage() {} func (*SegmentVersionInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{42} + return fileDescriptor_aab7cc9a69ed26e8, []int{43} } func (m *SegmentVersionInfo) XXX_Unmarshal(b []byte) error { @@ -3328,7 +3383,7 @@ func (m *ChannelVersionInfo) Reset() { *m = ChannelVersionInfo{} } func (m *ChannelVersionInfo) String() string { return proto.CompactTextString(m) } func (*ChannelVersionInfo) ProtoMessage() {} func (*ChannelVersionInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{43} + return fileDescriptor_aab7cc9a69ed26e8, []int{44} } func (m *ChannelVersionInfo) XXX_Unmarshal(b []byte) error { @@ -3376,6 +3431,7 @@ type CollectionLoadInfo struct { ReplicaNumber int32 `protobuf:"varint,3,opt,name=replica_number,json=replicaNumber,proto3" json:"replica_number,omitempty"` Status LoadStatus `protobuf:"varint,4,opt,name=status,proto3,enum=milvus.proto.query.LoadStatus" json:"status,omitempty"` FieldIndexID map[int64]int64 `protobuf:"bytes,5,rep,name=field_indexID,json=fieldIndexID,proto3" json:"field_indexID,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + LoadType LoadType `protobuf:"varint,6,opt,name=load_type,json=loadType,proto3,enum=milvus.proto.query.LoadType" json:"load_type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3385,7 +3441,7 @@ func (m *CollectionLoadInfo) Reset() { *m = CollectionLoadInfo{} } func (m *CollectionLoadInfo) String() string { return proto.CompactTextString(m) } func (*CollectionLoadInfo) ProtoMessage() {} func (*CollectionLoadInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{44} + return fileDescriptor_aab7cc9a69ed26e8, []int{45} } func (m *CollectionLoadInfo) XXX_Unmarshal(b []byte) error { @@ -3441,6 +3497,13 @@ func (m *CollectionLoadInfo) GetFieldIndexID() map[int64]int64 { return nil } +func (m *CollectionLoadInfo) GetLoadType() LoadType { + if m != nil { + return m.LoadType + } + return LoadType_UnKnownType +} + type PartitionLoadInfo struct { CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"` PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"` @@ -3456,7 +3519,7 @@ func (m *PartitionLoadInfo) Reset() { *m = PartitionLoadInfo{} } func (m *PartitionLoadInfo) String() string { return proto.CompactTextString(m) } func (*PartitionLoadInfo) ProtoMessage() {} func (*PartitionLoadInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{45} + return fileDescriptor_aab7cc9a69ed26e8, []int{46} } func (m *PartitionLoadInfo) XXX_Unmarshal(b []byte) error { @@ -3526,7 +3589,7 @@ func (m *Replica) Reset() { *m = Replica{} } func (m *Replica) String() string { return proto.CompactTextString(m) } func (*Replica) ProtoMessage() {} func (*Replica) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{46} + return fileDescriptor_aab7cc9a69ed26e8, []int{47} } func (m *Replica) XXX_Unmarshal(b []byte) error { @@ -3590,7 +3653,7 @@ func (m *SyncAction) Reset() { *m = SyncAction{} } func (m *SyncAction) String() string { return proto.CompactTextString(m) } func (*SyncAction) ProtoMessage() {} func (*SyncAction) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{47} + return fileDescriptor_aab7cc9a69ed26e8, []int{48} } func (m *SyncAction) XXX_Unmarshal(b []byte) error { @@ -3660,7 +3723,7 @@ func (m *SyncDistributionRequest) Reset() { *m = SyncDistributionRequest func (m *SyncDistributionRequest) String() string { return proto.CompactTextString(m) } func (*SyncDistributionRequest) ProtoMessage() {} func (*SyncDistributionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{48} + return fileDescriptor_aab7cc9a69ed26e8, []int{49} } func (m *SyncDistributionRequest) XXX_Unmarshal(b []byte) error { @@ -3722,7 +3785,7 @@ func (m *ResourceGroup) Reset() { *m = ResourceGroup{} } func (m *ResourceGroup) String() string { return proto.CompactTextString(m) } func (*ResourceGroup) ProtoMessage() {} func (*ResourceGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{49} + return fileDescriptor_aab7cc9a69ed26e8, []int{50} } func (m *ResourceGroup) XXX_Unmarshal(b []byte) error { @@ -3780,7 +3843,7 @@ func (m *TransferReplicaRequest) Reset() { *m = TransferReplicaRequest{} func (m *TransferReplicaRequest) String() string { return proto.CompactTextString(m) } func (*TransferReplicaRequest) ProtoMessage() {} func (*TransferReplicaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{50} + return fileDescriptor_aab7cc9a69ed26e8, []int{51} } func (m *TransferReplicaRequest) XXX_Unmarshal(b []byte) error { @@ -3848,7 +3911,7 @@ func (m *DescribeResourceGroupRequest) Reset() { *m = DescribeResourceGr func (m *DescribeResourceGroupRequest) String() string { return proto.CompactTextString(m) } func (*DescribeResourceGroupRequest) ProtoMessage() {} func (*DescribeResourceGroupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{51} + return fileDescriptor_aab7cc9a69ed26e8, []int{52} } func (m *DescribeResourceGroupRequest) XXX_Unmarshal(b []byte) error { @@ -3895,7 +3958,7 @@ func (m *DescribeResourceGroupResponse) Reset() { *m = DescribeResourceG func (m *DescribeResourceGroupResponse) String() string { return proto.CompactTextString(m) } func (*DescribeResourceGroupResponse) ProtoMessage() {} func (*DescribeResourceGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{52} + return fileDescriptor_aab7cc9a69ed26e8, []int{53} } func (m *DescribeResourceGroupResponse) XXX_Unmarshal(b []byte) error { @@ -3949,7 +4012,7 @@ func (m *ResourceGroupInfo) Reset() { *m = ResourceGroupInfo{} } func (m *ResourceGroupInfo) String() string { return proto.CompactTextString(m) } func (*ResourceGroupInfo) ProtoMessage() {} func (*ResourceGroupInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_aab7cc9a69ed26e8, []int{53} + return fileDescriptor_aab7cc9a69ed26e8, []int{54} } func (m *ResourceGroupInfo) XXX_Unmarshal(b []byte) error { @@ -4037,6 +4100,7 @@ func init() { proto.RegisterType((*GetShardLeadersRequest)(nil), "milvus.proto.query.GetShardLeadersRequest") proto.RegisterType((*GetShardLeadersResponse)(nil), "milvus.proto.query.GetShardLeadersResponse") proto.RegisterType((*ShardLeadersList)(nil), "milvus.proto.query.ShardLeadersList") + proto.RegisterType((*SyncNewCreatedPartitionRequest)(nil), "milvus.proto.query.SyncNewCreatedPartitionRequest") proto.RegisterType((*LoadMetaInfo)(nil), "milvus.proto.query.LoadMetaInfo") proto.RegisterType((*WatchDmChannelsRequest)(nil), "milvus.proto.query.WatchDmChannelsRequest") proto.RegisterMapType((map[int64]*datapb.SegmentInfo)(nil), "milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry") @@ -4088,268 +4152,271 @@ func init() { func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) } var fileDescriptor_aab7cc9a69ed26e8 = []byte{ - // 4161 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x49, 0x8c, 0x1c, 0x59, - 0x56, 0x8e, 0x5c, 0xaa, 0x32, 0x5f, 0x2e, 0x95, 0xf5, 0xcb, 0x65, 0xe7, 0xe4, 0x78, 0xa9, 0x09, - 0xb7, 0xbb, 0x0b, 0x77, 0x77, 0x55, 0x4f, 0x79, 0xa6, 0xf1, 0x6c, 0x1a, 0xec, 0xaa, 0xb1, 0xbb, - 0x68, 0xb7, 0xdb, 0x44, 0xd9, 0x1e, 0xd4, 0x6a, 0x26, 0x27, 0x32, 0xe3, 0x67, 0x56, 0xc8, 0xb1, - 0xa4, 0x23, 0x22, 0xcb, 0x5d, 0x8d, 0xc4, 0x69, 0x2e, 0x20, 0x06, 0x89, 0x0b, 0x27, 0xc4, 0x01, - 0x81, 0x34, 0x48, 0x8c, 0xc4, 0x01, 0x6e, 0x1c, 0x90, 0x90, 0xe0, 0x04, 0xe2, 0xc6, 0x91, 0x2b, - 0x12, 0x20, 0xc4, 0x61, 0x34, 0x9a, 0x1b, 0xfa, 0x5b, 0x44, 0xfc, 0x88, 0x1f, 0x95, 0x51, 0x55, - 0xbd, 0x0d, 0xe2, 0x16, 0xf1, 0xfe, 0xf2, 0xde, 0x7f, 0xfb, 0xfb, 0x0b, 0xac, 0xbe, 0x98, 0xe3, - 0xe0, 0x78, 0x38, 0xf6, 0xfd, 0xc0, 0xda, 0x9a, 0x05, 0x7e, 0xe4, 0x23, 0xe4, 0xda, 0xce, 0xd1, - 0x3c, 0x64, 0x7f, 0x5b, 0xb4, 0x7d, 0xd0, 0x1e, 0xfb, 0xae, 0xeb, 0x7b, 0x0c, 0x36, 0x68, 0xa7, - 0x7b, 0x0c, 0xba, 0xb6, 0x17, 0xe1, 0xc0, 0x33, 0x1d, 0xd1, 0x1a, 0x8e, 0x0f, 0xb1, 0x6b, 0xf2, - 0xbf, 0xa6, 0x1b, 0x4e, 0xf9, 0x67, 0xcf, 0x32, 0x23, 0x33, 0x8d, 0x4a, 0xff, 0x91, 0x06, 0x97, - 0x0e, 0x0e, 0xfd, 0x97, 0xbb, 0xbe, 0xe3, 0xe0, 0x71, 0x64, 0xfb, 0x5e, 0x68, 0xe0, 0x17, 0x73, - 0x1c, 0x46, 0xe8, 0x2d, 0xa8, 0x8d, 0xcc, 0x10, 0xf7, 0xb5, 0x0d, 0x6d, 0xb3, 0xb5, 0x73, 0x65, - 0x4b, 0x22, 0x8a, 0x53, 0xf3, 0x5e, 0x38, 0xbd, 0x67, 0x86, 0xd8, 0xa0, 0x3d, 0x11, 0x82, 0x9a, - 0x35, 0xda, 0xdf, 0xeb, 0x57, 0x36, 0xb4, 0xcd, 0xaa, 0x41, 0xbf, 0xd1, 0x2b, 0xd0, 0x19, 0xc7, - 0x73, 0xef, 0xef, 0x85, 0xfd, 0xea, 0x46, 0x75, 0xb3, 0x6a, 0xc8, 0x40, 0xfd, 0xdf, 0x34, 0xb8, - 0x9c, 0x23, 0x23, 0x9c, 0xf9, 0x5e, 0x88, 0xd1, 0x6d, 0x58, 0x0a, 0x23, 0x33, 0x9a, 0x87, 0x9c, - 0x92, 0x2f, 0x2b, 0x29, 0x39, 0xa0, 0x5d, 0x0c, 0xde, 0x35, 0x8f, 0xb6, 0xa2, 0x40, 0x8b, 0xbe, - 0x0a, 0x17, 0x6d, 0xef, 0x3d, 0xec, 0xfa, 0xc1, 0xf1, 0x70, 0x86, 0x83, 0x31, 0xf6, 0x22, 0x73, - 0x8a, 0x05, 0x8d, 0x6b, 0xa2, 0xed, 0x71, 0xd2, 0x84, 0xde, 0x86, 0xcb, 0x4c, 0x60, 0x21, 0x0e, - 0x8e, 0xec, 0x31, 0x1e, 0x9a, 0x47, 0xa6, 0xed, 0x98, 0x23, 0x07, 0xf7, 0x6b, 0x1b, 0xd5, 0xcd, - 0x86, 0xb1, 0x4e, 0x9b, 0x0f, 0x58, 0xeb, 0x5d, 0xd1, 0xa8, 0xff, 0xb9, 0x06, 0xeb, 0x64, 0x85, - 0x8f, 0xcd, 0x20, 0xb2, 0x3f, 0x05, 0x3e, 0xeb, 0xd0, 0x4e, 0xaf, 0xad, 0x5f, 0xa5, 0x6d, 0x12, - 0x8c, 0xf4, 0x99, 0x09, 0xf4, 0x84, 0x27, 0x35, 0xba, 0x4c, 0x09, 0xa6, 0xff, 0x19, 0x57, 0x88, - 0x34, 0x9d, 0xe7, 0x11, 0x44, 0x16, 0x67, 0x25, 0x8f, 0xf3, 0x0c, 0x62, 0xd0, 0xff, 0xa9, 0x0a, - 0xeb, 0x0f, 0x7d, 0xd3, 0x4a, 0x14, 0xe6, 0xb3, 0x67, 0xe7, 0x77, 0x60, 0x89, 0x19, 0x5a, 0xbf, - 0x46, 0x71, 0xdd, 0x94, 0x71, 0x71, 0x23, 0x4c, 0x28, 0x3c, 0xa0, 0x00, 0x83, 0x0f, 0x42, 0x37, - 0xa1, 0x1b, 0xe0, 0x99, 0x63, 0x8f, 0xcd, 0xa1, 0x37, 0x77, 0x47, 0x38, 0xe8, 0xd7, 0x37, 0xb4, - 0xcd, 0xba, 0xd1, 0xe1, 0xd0, 0x47, 0x14, 0x88, 0x7e, 0x08, 0x9d, 0x89, 0x8d, 0x1d, 0x6b, 0x68, - 0x7b, 0x16, 0xfe, 0x68, 0x7f, 0xaf, 0xbf, 0xb4, 0x51, 0xdd, 0x6c, 0xed, 0x7c, 0x6b, 0x2b, 0xef, - 0x24, 0xb6, 0x94, 0x1c, 0xd9, 0xba, 0x4f, 0x86, 0xef, 0xb3, 0xd1, 0xdf, 0xf3, 0xa2, 0xe0, 0xd8, - 0x68, 0x4f, 0x52, 0x20, 0xd4, 0x87, 0xe5, 0x00, 0x4f, 0x02, 0x1c, 0x1e, 0xf6, 0x97, 0x37, 0xb4, - 0xcd, 0x86, 0x21, 0x7e, 0xd1, 0x6b, 0xb0, 0x12, 0xe0, 0xd0, 0x9f, 0x07, 0x63, 0x3c, 0x9c, 0x06, - 0xfe, 0x7c, 0x16, 0xf6, 0x1b, 0x1b, 0xd5, 0xcd, 0xa6, 0xd1, 0x15, 0xe0, 0x07, 0x14, 0x3a, 0xf8, - 0x2e, 0xac, 0xe6, 0xb0, 0xa0, 0x1e, 0x54, 0x9f, 0xe3, 0x63, 0x2a, 0x88, 0xaa, 0x41, 0x3e, 0xd1, - 0x45, 0xa8, 0x1f, 0x99, 0xce, 0x1c, 0x73, 0x56, 0xb3, 0x9f, 0x6f, 0x56, 0xee, 0x68, 0xfa, 0x1f, - 0x6b, 0xd0, 0x37, 0xb0, 0x83, 0xcd, 0x10, 0x7f, 0x9e, 0x22, 0xbd, 0x04, 0x4b, 0x9e, 0x6f, 0xe1, - 0xfd, 0x3d, 0x2a, 0xd2, 0xaa, 0xc1, 0xff, 0xf4, 0x5f, 0x68, 0x70, 0xf1, 0x01, 0x8e, 0x88, 0x6e, - 0xdb, 0x61, 0x64, 0x8f, 0x63, 0xe3, 0xfd, 0x0e, 0x54, 0x03, 0xfc, 0x82, 0x53, 0xf6, 0xba, 0x4c, - 0x59, 0xec, 0x95, 0x55, 0x23, 0x0d, 0x32, 0x0e, 0x7d, 0x05, 0xda, 0x96, 0xeb, 0x0c, 0xc7, 0x87, - 0xa6, 0xe7, 0x61, 0x87, 0x59, 0x47, 0xd3, 0x68, 0x59, 0xae, 0xb3, 0xcb, 0x41, 0xe8, 0x1a, 0x40, - 0x88, 0xa7, 0x2e, 0xf6, 0xa2, 0xc4, 0x7b, 0xa6, 0x20, 0xe8, 0x16, 0xac, 0x4e, 0x02, 0xdf, 0x1d, - 0x86, 0x87, 0x66, 0x60, 0x0d, 0x1d, 0x6c, 0x5a, 0x38, 0xa0, 0xd4, 0x37, 0x8c, 0x15, 0xd2, 0x70, - 0x40, 0xe0, 0x0f, 0x29, 0x18, 0xdd, 0x86, 0x7a, 0x38, 0xf6, 0x67, 0x98, 0x6a, 0x5a, 0x77, 0xe7, - 0xaa, 0x4a, 0x87, 0xf6, 0xcc, 0xc8, 0x3c, 0x20, 0x9d, 0x0c, 0xd6, 0x57, 0xff, 0x1f, 0x6e, 0x6a, - 0x5f, 0x70, 0xcf, 0x95, 0x32, 0xc7, 0xfa, 0x27, 0x63, 0x8e, 0x4b, 0xa5, 0xcc, 0x71, 0xf9, 0x64, - 0x73, 0xcc, 0x71, 0xed, 0x34, 0xe6, 0xd8, 0x58, 0x68, 0x8e, 0xcd, 0x4f, 0xc7, 0x1c, 0xff, 0x2e, - 0x31, 0xc7, 0x2f, 0xba, 0xd8, 0x13, 0x93, 0xad, 0x4b, 0x26, 0xfb, 0x17, 0x1a, 0x7c, 0xe9, 0x01, - 0x8e, 0x62, 0xf2, 0x89, 0x05, 0xe2, 0x2f, 0x68, 0xd0, 0xfd, 0xa9, 0x06, 0x03, 0x15, 0xad, 0xe7, - 0x09, 0xbc, 0x1f, 0xc0, 0xa5, 0x18, 0xc7, 0xd0, 0xc2, 0xe1, 0x38, 0xb0, 0x67, 0x54, 0x8c, 0xd4, - 0xc9, 0xb4, 0x76, 0x6e, 0xa8, 0x34, 0x36, 0x4b, 0xc1, 0x7a, 0x3c, 0xc5, 0x5e, 0x6a, 0x06, 0xfd, - 0xc7, 0x1a, 0xac, 0x13, 0xa7, 0xc6, 0xbd, 0x90, 0x37, 0xf1, 0xcf, 0xce, 0x57, 0xd9, 0xbf, 0x55, - 0x72, 0xfe, 0xad, 0x04, 0x8f, 0x69, 0x16, 0x9b, 0xa5, 0xe7, 0x3c, 0xbc, 0xfb, 0x3a, 0xd4, 0x6d, - 0x6f, 0xe2, 0x0b, 0x56, 0x5d, 0x57, 0xb1, 0x2a, 0x8d, 0x8c, 0xf5, 0xd6, 0x3d, 0x46, 0x45, 0xe2, - 0x70, 0xcf, 0xa1, 0x6e, 0xd9, 0x65, 0x57, 0x14, 0xcb, 0xfe, 0x7d, 0x0d, 0x2e, 0xe7, 0x10, 0x9e, - 0x67, 0xdd, 0xdf, 0x86, 0x25, 0x1a, 0x46, 0xc4, 0xc2, 0x5f, 0x51, 0x2e, 0x3c, 0x85, 0xee, 0xa1, - 0x1d, 0x46, 0x06, 0x1f, 0xa3, 0xfb, 0xd0, 0xcb, 0xb6, 0x91, 0x00, 0xc7, 0x83, 0xdb, 0xd0, 0x33, - 0x5d, 0xc6, 0x80, 0xa6, 0xd1, 0xe2, 0xb0, 0x47, 0xa6, 0x8b, 0xd1, 0x97, 0xa0, 0x41, 0x4c, 0x76, - 0x68, 0x5b, 0x42, 0xfc, 0xcb, 0xd4, 0x84, 0xad, 0x10, 0x5d, 0x05, 0xa0, 0x4d, 0xa6, 0x65, 0x05, - 0x2c, 0xf6, 0x35, 0x8d, 0x26, 0x81, 0xdc, 0x25, 0x00, 0xfd, 0x0f, 0x35, 0x68, 0x13, 0x1f, 0xfb, - 0x1e, 0x8e, 0x4c, 0x22, 0x07, 0xf4, 0x0d, 0x68, 0x3a, 0xbe, 0x69, 0x0d, 0xa3, 0xe3, 0x19, 0x43, - 0xd5, 0xcd, 0xf2, 0x3a, 0x71, 0xcc, 0x4f, 0x8e, 0x67, 0xd8, 0x68, 0x38, 0xfc, 0xab, 0x0c, 0xbf, - 0x73, 0xa6, 0x5c, 0x55, 0x98, 0xf2, 0x3f, 0xd4, 0xe1, 0xd2, 0xf7, 0xcd, 0x68, 0x7c, 0xb8, 0xe7, - 0x8a, 0x10, 0x7e, 0x76, 0x25, 0x48, 0x7c, 0x5b, 0x25, 0xed, 0xdb, 0x3e, 0x31, 0xdf, 0x19, 0xeb, - 0x79, 0x5d, 0xa5, 0xe7, 0xa4, 0x58, 0xdc, 0x7a, 0xc6, 0x45, 0x95, 0xd2, 0xf3, 0x54, 0xa4, 0x5d, - 0x3a, 0x4b, 0xa4, 0xdd, 0x85, 0x0e, 0xfe, 0x68, 0xec, 0xcc, 0x89, 0xcc, 0x29, 0x76, 0x16, 0x42, - 0xaf, 0x29, 0xb0, 0xa7, 0x8d, 0xac, 0xcd, 0x07, 0xed, 0x73, 0x1a, 0x98, 0xa8, 0x5d, 0x1c, 0x99, - 0x34, 0x4e, 0xb6, 0x76, 0x36, 0x8a, 0x44, 0x2d, 0xf4, 0x83, 0x89, 0x9b, 0xfc, 0xa1, 0x2b, 0xd0, - 0xe4, 0x71, 0x7d, 0x7f, 0xaf, 0xdf, 0xa4, 0xec, 0x4b, 0x00, 0xc8, 0x84, 0x0e, 0xf7, 0x40, 0x9c, - 0x42, 0xa0, 0x14, 0x7e, 0x5b, 0x85, 0x40, 0x2d, 0xec, 0x34, 0xe5, 0x21, 0x8f, 0xf2, 0x61, 0x0a, - 0x44, 0x0a, 0x54, 0x7f, 0x32, 0x71, 0x6c, 0x0f, 0x3f, 0x62, 0x12, 0x6e, 0x51, 0x22, 0x64, 0x20, - 0xc9, 0x05, 0x8e, 0x70, 0x10, 0xda, 0xbe, 0xd7, 0x6f, 0xd3, 0x76, 0xf1, 0x3b, 0x18, 0xc2, 0x6a, - 0x0e, 0x85, 0x22, 0xc4, 0x7f, 0x2d, 0x1d, 0xe2, 0x17, 0xf3, 0x38, 0x95, 0x02, 0xfc, 0x44, 0x83, - 0xf5, 0xa7, 0x5e, 0x38, 0x1f, 0xc5, 0x6b, 0xfb, 0x7c, 0xf4, 0x38, 0xeb, 0x41, 0x6a, 0x39, 0x0f, - 0xa2, 0xff, 0xa8, 0x0e, 0x2b, 0x7c, 0x15, 0x44, 0xdc, 0xd4, 0x15, 0x5c, 0x81, 0x66, 0x1c, 0x44, - 0x38, 0x43, 0x12, 0x00, 0xda, 0x80, 0x56, 0xca, 0x10, 0x38, 0x55, 0x69, 0x50, 0x29, 0xd2, 0x44, - 0x4a, 0x50, 0x4b, 0xa5, 0x04, 0x57, 0x01, 0x26, 0xce, 0x3c, 0x3c, 0x1c, 0x46, 0xb6, 0x8b, 0x79, - 0x4a, 0xd2, 0xa4, 0x90, 0x27, 0xb6, 0x8b, 0xd1, 0x5d, 0x68, 0x8f, 0x6c, 0xcf, 0xf1, 0xa7, 0xc3, - 0x99, 0x19, 0x1d, 0x86, 0xbc, 0x98, 0x53, 0x89, 0x85, 0x26, 0x70, 0xf7, 0x68, 0x5f, 0xa3, 0xc5, - 0xc6, 0x3c, 0x26, 0x43, 0xd0, 0x35, 0x68, 0x79, 0x73, 0x77, 0xe8, 0x4f, 0x86, 0x81, 0xff, 0x32, - 0xa4, 0x25, 0x5b, 0xd5, 0x68, 0x7a, 0x73, 0xf7, 0xfd, 0x89, 0xe1, 0xbf, 0x24, 0x4e, 0xbc, 0x49, - 0xdc, 0x79, 0xe8, 0xf8, 0x53, 0x56, 0xae, 0x2d, 0x9e, 0x3f, 0x19, 0x40, 0x46, 0x5b, 0xd8, 0x89, - 0x4c, 0x3a, 0xba, 0x59, 0x6e, 0x74, 0x3c, 0x00, 0xbd, 0x0a, 0xdd, 0xb1, 0xef, 0xce, 0x4c, 0xca, - 0xa1, 0xfb, 0x81, 0xef, 0x52, 0xcb, 0xa9, 0x1a, 0x19, 0x28, 0xda, 0x85, 0x16, 0xcd, 0x9f, 0xb9, - 0x79, 0xb5, 0x28, 0x1e, 0x5d, 0x65, 0x5e, 0xa9, 0x3c, 0x96, 0x28, 0x28, 0xd8, 0xe2, 0x33, 0x24, - 0x9a, 0x21, 0xac, 0x34, 0xb4, 0x3f, 0xc6, 0xdc, 0x42, 0x5a, 0x1c, 0x76, 0x60, 0x7f, 0x8c, 0x49, - 0x52, 0x6f, 0x7b, 0x21, 0x0e, 0x22, 0x51, 0x62, 0xf5, 0x3b, 0x54, 0x7d, 0x3a, 0x0c, 0xca, 0x15, - 0x1b, 0xed, 0x41, 0x37, 0x8c, 0xcc, 0x20, 0x1a, 0xce, 0xfc, 0x90, 0x2a, 0x40, 0xbf, 0x4b, 0x75, - 0x3b, 0x53, 0x20, 0xb9, 0xe1, 0x94, 0x28, 0xf6, 0x63, 0xde, 0xc9, 0xe8, 0xd0, 0x41, 0xe2, 0x57, - 0xff, 0xef, 0x0a, 0x74, 0x65, 0x72, 0x89, 0xfd, 0xb2, 0xdc, 0x5e, 0xe8, 0xa0, 0xf8, 0x25, 0xc4, - 0x63, 0xcf, 0x1c, 0x39, 0x98, 0x15, 0x12, 0x54, 0x05, 0x1b, 0x46, 0x8b, 0xc1, 0xe8, 0x04, 0x44, - 0x95, 0x18, 0x93, 0xa8, 0xde, 0x57, 0x29, 0xe1, 0x4d, 0x0a, 0xa1, 0x71, 0xb3, 0x0f, 0xcb, 0xa2, - 0x06, 0x61, 0x0a, 0x28, 0x7e, 0x49, 0xcb, 0x68, 0x6e, 0x53, 0xac, 0x4c, 0x01, 0xc5, 0x2f, 0xda, - 0x83, 0x36, 0x9b, 0x72, 0x66, 0x06, 0xa6, 0x2b, 0xd4, 0xef, 0x2b, 0x4a, 0x13, 0x7e, 0x17, 0x1f, - 0x3f, 0x23, 0xde, 0xe0, 0xb1, 0x69, 0x07, 0x06, 0x13, 0xd7, 0x63, 0x3a, 0x0a, 0x6d, 0x42, 0x8f, - 0xcd, 0x32, 0xb1, 0x1d, 0xcc, 0x15, 0x79, 0x99, 0x15, 0x22, 0x14, 0x7e, 0xdf, 0x76, 0x30, 0xd3, - 0xd5, 0x78, 0x09, 0x54, 0x40, 0x0d, 0xa6, 0xaa, 0x14, 0x42, 0xc5, 0x73, 0x03, 0x3a, 0xac, 0x59, - 0x38, 0x39, 0xe6, 0x89, 0x19, 0x8d, 0xcf, 0x18, 0x8c, 0xe6, 0x07, 0x73, 0x97, 0x29, 0x3b, 0xb0, - 0xe5, 0x78, 0x73, 0x97, 0xa8, 0xba, 0xfe, 0xe3, 0x1a, 0xac, 0x11, 0x8b, 0xe7, 0xc6, 0x7f, 0x8e, - 0x48, 0x7b, 0x15, 0xc0, 0x0a, 0xa3, 0xa1, 0xe4, 0xa5, 0x9a, 0x56, 0x18, 0x71, 0x3f, 0xfc, 0x0d, - 0x11, 0x28, 0xab, 0xc5, 0xb9, 0x73, 0xc6, 0x03, 0xe5, 0x83, 0xe5, 0x99, 0x76, 0x89, 0x6e, 0x40, - 0x87, 0x57, 0x7c, 0x52, 0x95, 0xd3, 0x66, 0xc0, 0x47, 0x6a, 0x3f, 0xba, 0xa4, 0xdc, 0xad, 0x4a, - 0x05, 0xcc, 0xe5, 0xf3, 0x05, 0xcc, 0x46, 0x36, 0x60, 0xde, 0x87, 0x15, 0xea, 0x04, 0x62, 0x03, - 0x12, 0xbe, 0x63, 0x81, 0x05, 0x75, 0xe9, 0x28, 0xf1, 0x1b, 0xa6, 0xe3, 0x1d, 0x48, 0xf1, 0x8e, - 0xf0, 0xc1, 0xc3, 0xd8, 0x1a, 0x46, 0x81, 0xe9, 0x85, 0x13, 0x1c, 0xd0, 0x78, 0xd9, 0x30, 0xda, - 0x04, 0xf8, 0x84, 0xc3, 0xf4, 0x7f, 0xae, 0xc0, 0x25, 0x5e, 0xb6, 0x9e, 0x5f, 0x25, 0x8a, 0x82, - 0x96, 0xf0, 0xfa, 0xd5, 0x13, 0x0a, 0xc1, 0x5a, 0x89, 0x84, 0xac, 0xae, 0x48, 0xc8, 0xe4, 0x62, - 0x68, 0x29, 0x57, 0x0c, 0xc5, 0x1b, 0x38, 0xcb, 0xe5, 0x37, 0x70, 0x48, 0x99, 0x4f, 0x33, 0x74, - 0x2a, 0xb6, 0xa6, 0xc1, 0x7e, 0xca, 0x31, 0xf4, 0x3f, 0x34, 0xe8, 0x1c, 0x60, 0x33, 0x18, 0x1f, - 0x0a, 0x3e, 0xbe, 0x9d, 0xde, 0xf0, 0x7a, 0xa5, 0x60, 0xc3, 0x4b, 0x1a, 0xf2, 0xcb, 0xb3, 0xd3, - 0xf5, 0x9f, 0x1a, 0xb4, 0x7f, 0x83, 0x34, 0x89, 0xc5, 0xde, 0x49, 0x2f, 0xf6, 0xd5, 0x82, 0xc5, - 0x1a, 0x38, 0x0a, 0x6c, 0x7c, 0x84, 0x7f, 0xe9, 0x96, 0xfb, 0x8f, 0x1a, 0x0c, 0x0e, 0x8e, 0xbd, - 0xb1, 0xc1, 0xcc, 0xf8, 0xfc, 0x16, 0x73, 0x03, 0x3a, 0x47, 0x52, 0xae, 0x56, 0xa1, 0x0a, 0xd7, - 0x3e, 0x4a, 0x97, 0x7b, 0x06, 0xf4, 0xc4, 0x3e, 0x1b, 0x5f, 0xac, 0xf0, 0xaa, 0xaf, 0xa9, 0xa8, - 0xce, 0x10, 0x47, 0xbd, 0xd2, 0x4a, 0x20, 0x03, 0xf5, 0x3f, 0xd0, 0x60, 0x4d, 0xd1, 0x11, 0x5d, - 0x86, 0x65, 0x5e, 0x5a, 0xf2, 0xf0, 0xcb, 0x6c, 0xd8, 0x22, 0xe2, 0x49, 0x36, 0x47, 0x6c, 0x2b, - 0x9f, 0x00, 0x5a, 0xe8, 0x3a, 0xb4, 0xe2, 0x1a, 0xc0, 0xca, 0xc9, 0xc7, 0x0a, 0xd1, 0x00, 0x1a, - 0xdc, 0x39, 0x89, 0xe2, 0x2a, 0xfe, 0xd7, 0xff, 0x56, 0x83, 0x4b, 0xef, 0x98, 0x9e, 0xe5, 0x4f, - 0x26, 0xe7, 0x67, 0xeb, 0x2e, 0x48, 0xa5, 0x43, 0xd9, 0x4d, 0x09, 0xb9, 0xde, 0x78, 0x1d, 0x56, - 0x03, 0xe6, 0x19, 0x2d, 0x99, 0xef, 0x55, 0xa3, 0x27, 0x1a, 0x62, 0x7e, 0xfe, 0x65, 0x05, 0x10, - 0x89, 0x03, 0xf7, 0x4c, 0xc7, 0xf4, 0xc6, 0xf8, 0xec, 0xa4, 0xdf, 0x84, 0xae, 0x14, 0xbd, 0xe2, - 0x73, 0xb8, 0x74, 0xf8, 0x0a, 0xd1, 0xbb, 0xd0, 0x1d, 0x31, 0x54, 0xc3, 0x00, 0x9b, 0xa1, 0xef, - 0x51, 0xe7, 0xda, 0x55, 0xef, 0x3f, 0x3c, 0x09, 0xec, 0xe9, 0x14, 0x07, 0xbb, 0xbe, 0x67, 0xf1, - 0x34, 0x6c, 0x24, 0xc8, 0x24, 0x43, 0x89, 0xe0, 0x92, 0x50, 0x2e, 0x44, 0x03, 0x71, 0x2c, 0xa7, - 0xac, 0x08, 0xb1, 0xe9, 0x24, 0x8c, 0x48, 0xbc, 0x71, 0x8f, 0x35, 0x1c, 0x14, 0x6f, 0x3f, 0x29, - 0x42, 0xab, 0xfe, 0xd7, 0x1a, 0xa0, 0xb8, 0x4a, 0xa2, 0xf5, 0x20, 0xd5, 0xbe, 0xec, 0x50, 0x4d, - 0x11, 0x14, 0xae, 0x40, 0xd3, 0x12, 0x23, 0xb9, 0xb9, 0x24, 0x00, 0xea, 0xa3, 0x29, 0xd1, 0x43, - 0x12, 0x87, 0xb1, 0x25, 0xaa, 0x10, 0x06, 0x7c, 0x48, 0x61, 0x72, 0x64, 0xae, 0x65, 0x23, 0x73, - 0x7a, 0x77, 0xa5, 0x2e, 0xed, 0xae, 0xe8, 0x3f, 0xa9, 0x40, 0x8f, 0xba, 0xbb, 0xdd, 0xa4, 0xc4, - 0x2f, 0x45, 0xf4, 0x0d, 0xe8, 0xf0, 0x43, 0x6b, 0x89, 0xf0, 0xf6, 0x8b, 0xd4, 0x64, 0xe8, 0x2d, - 0xb8, 0xc8, 0x3a, 0x05, 0x38, 0x9c, 0x3b, 0x49, 0x02, 0xce, 0xf2, 0x58, 0xf4, 0x82, 0xf9, 0x59, - 0xd2, 0x24, 0x46, 0x3c, 0x85, 0x4b, 0x53, 0xc7, 0x1f, 0x99, 0xce, 0x50, 0x16, 0x0f, 0x93, 0x61, - 0x09, 0x8d, 0xbf, 0xc8, 0x86, 0x1f, 0xa4, 0x65, 0x18, 0xa2, 0x7b, 0xa4, 0x98, 0xc7, 0xcf, 0x93, - 0xdc, 0xbe, 0x5e, 0x26, 0xb7, 0x6f, 0x93, 0x31, 0x71, 0x6a, 0xff, 0x27, 0x1a, 0xac, 0x64, 0xf6, - 0x46, 0xb3, 0x35, 0xa4, 0x96, 0xaf, 0x21, 0xef, 0x40, 0x9d, 0x14, 0x56, 0xcc, 0x0f, 0x76, 0xd5, - 0xf5, 0x8d, 0x3c, 0xab, 0xc1, 0x06, 0xa0, 0x6d, 0x58, 0x53, 0x9c, 0x88, 0x72, 0xf1, 0xa3, 0xfc, - 0x81, 0xa8, 0xfe, 0xb3, 0x1a, 0xb4, 0x52, 0xac, 0x58, 0x50, 0xfe, 0x96, 0xd9, 0xec, 0xca, 0x2c, - 0xaf, 0x9a, 0x5f, 0x5e, 0xc1, 0x61, 0x19, 0x51, 0x39, 0x17, 0xbb, 0x2c, 0xe5, 0xe7, 0xf5, 0x87, - 0x8b, 0x5d, 0x9a, 0xf0, 0xa7, 0x73, 0xf9, 0x25, 0x29, 0x97, 0xcf, 0x54, 0x3b, 0xcb, 0x27, 0x54, - 0x3b, 0x0d, 0xb9, 0xda, 0x91, 0x4c, 0xa8, 0x99, 0x35, 0xa1, 0xb2, 0x15, 0xe9, 0x5b, 0xb0, 0x36, - 0x0e, 0xb0, 0x19, 0x61, 0xeb, 0xde, 0xf1, 0x6e, 0xdc, 0xc4, 0x93, 0x22, 0x55, 0x13, 0xba, 0x9f, - 0x6c, 0x12, 0x31, 0x29, 0xb7, 0xa9, 0x94, 0xd5, 0xc5, 0x14, 0x97, 0x0d, 0x13, 0xb2, 0xf0, 0xcc, - 0xf4, 0x2f, 0x5b, 0x0b, 0x77, 0xce, 0x54, 0x0b, 0x5f, 0x87, 0x96, 0x88, 0xaa, 0xc4, 0xd2, 0xbb, - 0xcc, 0xe9, 0x09, 0x37, 0x60, 0x85, 0x92, 0x1f, 0x58, 0x91, 0x77, 0x59, 0xb3, 0xa5, 0x68, 0x2f, - 0x5f, 0x8a, 0x5e, 0x86, 0x65, 0x3b, 0x1c, 0x4e, 0xcc, 0xe7, 0xb8, 0xbf, 0x4a, 0x5b, 0x97, 0xec, - 0xf0, 0xbe, 0xf9, 0x1c, 0xeb, 0xff, 0x52, 0x85, 0x6e, 0x52, 0xbb, 0x94, 0xf6, 0x20, 0x65, 0x6e, - 0x05, 0x3c, 0x82, 0x5e, 0x12, 0xa3, 0x29, 0x87, 0x4f, 0x2c, 0xbf, 0xb2, 0x47, 0x17, 0x2b, 0xb3, - 0x8c, 0xbd, 0x4a, 0x9b, 0xc3, 0xb5, 0x53, 0x6d, 0x0e, 0x9f, 0xf3, 0x68, 0xf1, 0x36, 0xac, 0xc7, - 0xb1, 0x57, 0x5a, 0x36, 0x4b, 0xf0, 0x2f, 0x8a, 0xc6, 0xc7, 0xe9, 0xe5, 0x17, 0xb8, 0x80, 0xe5, - 0x22, 0x17, 0x90, 0x55, 0x81, 0x46, 0x4e, 0x05, 0xf2, 0x27, 0x9c, 0x4d, 0xc5, 0x09, 0xa7, 0xfe, - 0x14, 0xd6, 0xe8, 0xbe, 0x5f, 0x38, 0x0e, 0xec, 0x11, 0x8e, 0xd3, 0xd5, 0x32, 0x62, 0x1d, 0x40, - 0x23, 0x93, 0xf1, 0xc6, 0xff, 0xfa, 0xef, 0x69, 0x70, 0x29, 0x3f, 0x2f, 0xd5, 0x98, 0xc4, 0x91, - 0x68, 0x92, 0x23, 0xf9, 0x4d, 0x58, 0x4b, 0xa6, 0x97, 0x73, 0xe9, 0x82, 0x6c, 0x51, 0x41, 0xb8, - 0x81, 0x92, 0x39, 0x04, 0x4c, 0xff, 0x99, 0x16, 0x6f, 0x9f, 0x12, 0xd8, 0x94, 0x6e, 0x2a, 0x93, - 0xb8, 0xe6, 0x7b, 0x8e, 0xed, 0xc5, 0xb5, 0x36, 0x5f, 0x23, 0x03, 0xf2, 0x5a, 0xfb, 0x1d, 0x58, - 0xe1, 0x9d, 0xe2, 0xf0, 0x54, 0x32, 0x21, 0xeb, 0xb2, 0x71, 0x71, 0x60, 0xba, 0x09, 0x5d, 0xbe, - 0xdb, 0x2b, 0xf0, 0x55, 0x55, 0x7b, 0xc0, 0xbf, 0x0e, 0x3d, 0xd1, 0xed, 0xb4, 0x01, 0x71, 0x85, - 0x0f, 0x8c, 0x13, 0xbb, 0xdf, 0xd5, 0xa0, 0x2f, 0x87, 0xc7, 0xd4, 0xf2, 0x4f, 0x9f, 0xde, 0x7d, - 0x4b, 0x3e, 0x27, 0xbb, 0x79, 0x02, 0x3d, 0x09, 0x1e, 0x71, 0x5a, 0xf6, 0x88, 0x9e, 0x79, 0x92, - 0xaa, 0x64, 0xcf, 0x0e, 0xa3, 0xc0, 0x1e, 0xcd, 0xcf, 0x75, 0xe7, 0x43, 0xff, 0x9b, 0x0a, 0x7c, - 0x59, 0x39, 0xe1, 0x79, 0x4e, 0xc4, 0x8a, 0x36, 0x01, 0xee, 0x41, 0x23, 0x53, 0xbd, 0xbc, 0x7a, - 0xc2, 0xe2, 0xf9, 0x56, 0x16, 0xdb, 0x52, 0x09, 0x93, 0xc4, 0x24, 0xb1, 0x96, 0x5a, 0xf1, 0x1c, - 0x5c, 0x69, 0xa5, 0x39, 0xc4, 0x38, 0x74, 0x17, 0xda, 0xac, 0x32, 0x1c, 0x1e, 0xd9, 0xf8, 0xa5, - 0x38, 0xc8, 0xb9, 0xa6, 0xf4, 0x6b, 0xb4, 0xdf, 0x33, 0x1b, 0xbf, 0x34, 0x5a, 0x4e, 0xfc, 0x1d, - 0xea, 0xff, 0x55, 0x05, 0x48, 0xda, 0x48, 0x59, 0x9a, 0x18, 0x0c, 0xb7, 0x80, 0x14, 0x84, 0x04, - 0x62, 0x39, 0xed, 0x13, 0xbf, 0xc8, 0x48, 0xf6, 0x63, 0x2d, 0x3b, 0x8c, 0x38, 0x5f, 0xb6, 0x4f, - 0xa6, 0x45, 0xb0, 0x88, 0x88, 0x8c, 0x9d, 0x93, 0x88, 0xb2, 0x8b, 0x40, 0xd0, 0x9b, 0x80, 0xa6, - 0x81, 0xff, 0xd2, 0xf6, 0xa6, 0xe9, 0x64, 0x9d, 0xe5, 0xf4, 0xab, 0xbc, 0x25, 0x95, 0xad, 0xff, - 0x00, 0x7a, 0x99, 0xee, 0x82, 0x25, 0xb7, 0x17, 0x90, 0xf1, 0x40, 0x9a, 0x8b, 0x1f, 0xd9, 0xac, - 0xc8, 0x18, 0xc2, 0xc1, 0x10, 0x7a, 0x59, 0x7a, 0x15, 0x87, 0x2e, 0x5f, 0x97, 0x0f, 0x5d, 0x4e, - 0x32, 0x53, 0x32, 0x4d, 0xea, 0xd4, 0x65, 0x60, 0xc2, 0x45, 0x15, 0x25, 0x0a, 0x24, 0xb7, 0x65, - 0x24, 0x0b, 0xd2, 0xd9, 0xd4, 0xc1, 0xce, 0x77, 0xe3, 0x4c, 0x91, 0x72, 0xb8, 0xc8, 0xf9, 0xa6, - 0xb6, 0xe2, 0x2a, 0xd2, 0x56, 0x9c, 0xfe, 0x47, 0x1a, 0xa0, 0xbc, 0x62, 0xa3, 0x2e, 0x54, 0xe2, - 0x49, 0x2a, 0xfb, 0x7b, 0x19, 0x45, 0xaa, 0xe4, 0x14, 0xe9, 0x0a, 0x34, 0xe3, 0x60, 0xc8, 0x3d, - 0x5f, 0x02, 0x48, 0xab, 0x59, 0x4d, 0x56, 0xb3, 0x14, 0x61, 0x75, 0x99, 0xb0, 0x43, 0x40, 0x79, - 0x63, 0x49, 0xcf, 0xa4, 0xc9, 0x33, 0x2d, 0xa2, 0x30, 0x85, 0xa9, 0x2a, 0x63, 0xfa, 0xf7, 0x0a, - 0xa0, 0x24, 0xdc, 0xc7, 0x87, 0x4e, 0x65, 0x62, 0xe4, 0x36, 0xac, 0xe5, 0x93, 0x01, 0x91, 0x01, - 0xa1, 0x5c, 0x2a, 0xa0, 0x0a, 0xdb, 0x55, 0xd5, 0xc5, 0xa4, 0xb7, 0x63, 0xf7, 0xc6, 0x72, 0x9b, - 0x6b, 0x45, 0xb9, 0x4d, 0xc6, 0xc3, 0xfd, 0x56, 0xf6, 0x42, 0x13, 0xb3, 0x97, 0x3b, 0x4a, 0x57, - 0x94, 0x5b, 0xf2, 0xa2, 0xdb, 0x4c, 0xe7, 0xbf, 0x8a, 0xf4, 0xaf, 0x15, 0x58, 0x8d, 0xb9, 0x71, - 0x2a, 0x4e, 0x2f, 0x3e, 0xe4, 0xfb, 0x94, 0x59, 0xfb, 0xa1, 0x9a, 0xb5, 0xbf, 0x7a, 0x62, 0xfa, - 0xfa, 0xd9, 0x71, 0xf6, 0x63, 0x58, 0xe6, 0x9b, 0x66, 0x39, 0xdb, 0x2d, 0x53, 0x20, 0x5e, 0x84, - 0x3a, 0x71, 0x15, 0x62, 0x17, 0x89, 0xfd, 0x30, 0x96, 0xa6, 0xef, 0xa8, 0x71, 0xf3, 0xed, 0x48, - 0x57, 0xd4, 0xf4, 0xbf, 0xd2, 0x00, 0x0e, 0x8e, 0xbd, 0xf1, 0x5d, 0x66, 0x69, 0x6f, 0x41, 0x6d, - 0xd1, 0x9d, 0x0d, 0xd2, 0x9b, 0xa6, 0xe5, 0xb4, 0x67, 0x09, 0xe1, 0x4a, 0x25, 0x70, 0x35, 0x5b, - 0x02, 0x17, 0x15, 0xaf, 0xc5, 0xde, 0xe5, 0xef, 0x35, 0xb8, 0x4c, 0x88, 0xf8, 0x44, 0xb2, 0x95, - 0x52, 0x1c, 0x4e, 0x79, 0xae, 0xaa, 0xec, 0xb9, 0xee, 0xc0, 0x32, 0xab, 0x42, 0x45, 0xe6, 0x70, - 0xad, 0x88, 0x65, 0x8c, 0xc1, 0x86, 0xe8, 0xae, 0x3f, 0x85, 0x8e, 0x91, 0x96, 0x04, 0x42, 0x50, - 0x4b, 0xdd, 0xcc, 0xa1, 0xdf, 0x34, 0x8f, 0x37, 0x67, 0xe6, 0xd8, 0x8e, 0x8e, 0x29, 0x61, 0x75, - 0x23, 0xfe, 0x57, 0x8b, 0x5d, 0xff, 0xb9, 0x06, 0x97, 0xc4, 0xa9, 0x01, 0x57, 0xaa, 0xb3, 0xf3, - 0x66, 0x07, 0xd6, 0xb9, 0x06, 0x65, 0x54, 0x89, 0x25, 0x1c, 0x6b, 0x0c, 0x26, 0x2f, 0x63, 0x07, - 0xd6, 0x23, 0x33, 0x98, 0xe2, 0x28, 0x3b, 0x86, 0x71, 0x6e, 0x8d, 0x35, 0xca, 0x63, 0xca, 0x9c, - 0xda, 0x5c, 0x67, 0xa7, 0xed, 0xdc, 0x21, 0x70, 0x9d, 0x00, 0x6f, 0xee, 0xf2, 0x55, 0xea, 0x2f, - 0xe1, 0x0a, 0xbb, 0x1b, 0x37, 0x92, 0x29, 0x3a, 0xd7, 0xa6, 0xa9, 0x72, 0xdd, 0x19, 0x13, 0xfa, - 0x53, 0x0d, 0xae, 0x16, 0x60, 0x3e, 0x4f, 0xc6, 0xfb, 0x50, 0x89, 0xbd, 0x20, 0xb9, 0x97, 0xf0, - 0xd2, 0xd4, 0x34, 0x43, 0xe4, 0x2f, 0x6a, 0xb0, 0x9a, 0xeb, 0x74, 0x6a, 0x9d, 0x7b, 0x03, 0x10, - 0x11, 0x42, 0xfc, 0xd4, 0x82, 0xd6, 0x4b, 0xdc, 0x57, 0xf7, 0xbc, 0xb9, 0x1b, 0x3f, 0xb3, 0x20, - 0x25, 0x13, 0xb2, 0x59, 0x6f, 0xb6, 0x65, 0x1a, 0x4b, 0xae, 0x56, 0x7c, 0x4f, 0x37, 0x47, 0xe0, - 0xd6, 0xa3, 0xb9, 0xcb, 0x76, 0x57, 0xb9, 0x94, 0x99, 0xff, 0x25, 0xa8, 0x24, 0x30, 0x9a, 0xc0, - 0x2a, 0xbd, 0x8b, 0x31, 0x8f, 0xa6, 0x3e, 0x49, 0x3a, 0x29, 0x5d, 0xcc, 0xcb, 0x7f, 0xb3, 0x34, - 0xa6, 0xf7, 0xf9, 0x68, 0x42, 0x3c, 0xcf, 0x3b, 0x3d, 0x19, 0x2a, 0xf0, 0xd8, 0xde, 0xd8, 0x77, - 0x63, 0x3c, 0x4b, 0xa7, 0xc4, 0xb3, 0xcf, 0x47, 0xcb, 0x78, 0xd2, 0xd0, 0xc1, 0x2e, 0xac, 0x2b, - 0x97, 0xbe, 0x28, 0xae, 0xd4, 0xd3, 0x39, 0xec, 0x3d, 0xb8, 0xa8, 0x5a, 0xd5, 0x19, 0xe6, 0xc8, - 0x51, 0x7c, 0x9a, 0x39, 0x6e, 0xfd, 0x1a, 0x34, 0xe3, 0x33, 0x2f, 0xd4, 0x82, 0xe5, 0xa7, 0xde, - 0xbb, 0x9e, 0xff, 0xd2, 0xeb, 0x5d, 0x40, 0xcb, 0x50, 0xbd, 0xeb, 0x38, 0x3d, 0x0d, 0x75, 0xa0, - 0x79, 0x10, 0x05, 0xd8, 0x24, 0x48, 0x7a, 0x15, 0xd4, 0x05, 0x78, 0xc7, 0x0e, 0x23, 0x3f, 0xb0, - 0xc7, 0xa6, 0xd3, 0xab, 0xde, 0xfa, 0x18, 0xba, 0xf2, 0xb6, 0x12, 0x6a, 0x43, 0xe3, 0x91, 0x1f, - 0x7d, 0xef, 0x23, 0x3b, 0x8c, 0x7a, 0x17, 0x48, 0xff, 0x47, 0x7e, 0xf4, 0x38, 0xc0, 0x21, 0xf6, - 0xa2, 0x9e, 0x86, 0x00, 0x96, 0xde, 0xf7, 0xf6, 0xec, 0xf0, 0x79, 0xaf, 0x82, 0xd6, 0xf8, 0x8e, - 0xb1, 0xe9, 0xec, 0xf3, 0xbd, 0x9a, 0x5e, 0x95, 0x0c, 0x8f, 0xff, 0x6a, 0xa8, 0x07, 0xed, 0xb8, - 0xcb, 0x83, 0xc7, 0x4f, 0x7b, 0x75, 0xd4, 0x84, 0x3a, 0xfb, 0x5c, 0xba, 0x65, 0x41, 0x2f, 0x7b, - 0xd2, 0x41, 0xe6, 0x64, 0x8b, 0x88, 0x41, 0xbd, 0x0b, 0x64, 0x65, 0xfc, 0xa8, 0xa9, 0xa7, 0xa1, - 0x15, 0x68, 0xa5, 0x0e, 0x6e, 0x7a, 0x15, 0x02, 0x78, 0x10, 0xcc, 0xc6, 0xdc, 0x1b, 0x31, 0x12, - 0x08, 0x3b, 0xf7, 0x08, 0x27, 0x6a, 0xb7, 0xee, 0x41, 0x43, 0xec, 0x77, 0x91, 0xae, 0x9c, 0x45, - 0xe4, 0xb7, 0x77, 0x01, 0xad, 0x42, 0x47, 0xba, 0xc2, 0xde, 0xd3, 0x10, 0x82, 0xae, 0xfc, 0xc8, - 0xa4, 0x57, 0xb9, 0xb5, 0x03, 0x90, 0x24, 0x3f, 0x84, 0x9c, 0x7d, 0xef, 0xc8, 0x74, 0x6c, 0x8b, - 0xd1, 0x46, 0x9a, 0x08, 0x77, 0x29, 0x77, 0x98, 0x66, 0xf5, 0x2a, 0xb7, 0xae, 0x43, 0x43, 0x04, - 0x74, 0x02, 0x37, 0xb0, 0xeb, 0x1f, 0x61, 0x26, 0x99, 0x03, 0x1c, 0xf5, 0xb4, 0x9d, 0x9f, 0x23, - 0x00, 0x76, 0x38, 0xe1, 0xfb, 0x81, 0x85, 0x1c, 0x40, 0x0f, 0x70, 0xb4, 0xeb, 0xbb, 0x33, 0xdf, - 0x13, 0x3b, 0xa7, 0x21, 0xda, 0xca, 0x14, 0x3d, 0xec, 0x27, 0xdf, 0x91, 0xaf, 0x7e, 0xf0, 0x8a, - 0xb2, 0x7f, 0xa6, 0xb3, 0x7e, 0x01, 0xb9, 0x14, 0xdb, 0x13, 0xdb, 0xc5, 0x4f, 0xec, 0xf1, 0xf3, - 0xf8, 0x44, 0xa3, 0xf8, 0x79, 0x47, 0xa6, 0xab, 0xc0, 0x77, 0x43, 0x89, 0xef, 0x20, 0x0a, 0x6c, - 0x6f, 0x2a, 0xbc, 0xb4, 0x7e, 0x01, 0xbd, 0xc8, 0x3c, 0x2e, 0x11, 0x08, 0x77, 0xca, 0xbc, 0x27, - 0x39, 0x1b, 0x4a, 0x07, 0x56, 0x32, 0xef, 0xed, 0xd0, 0x2d, 0xf5, 0x65, 0x5f, 0xd5, 0xdb, 0xc0, - 0xc1, 0xeb, 0xa5, 0xfa, 0xc6, 0xd8, 0x6c, 0xe8, 0xca, 0x6f, 0xca, 0xd0, 0xaf, 0x14, 0x4d, 0x90, - 0x7b, 0x6e, 0x30, 0xb8, 0x55, 0xa6, 0x6b, 0x8c, 0xea, 0x03, 0xa6, 0xa0, 0x8b, 0x50, 0x29, 0x9f, - 0x66, 0x0c, 0x4e, 0x0a, 0x90, 0xfa, 0x05, 0xf4, 0x43, 0x12, 0xcb, 0x32, 0x8f, 0x22, 0xd0, 0x1b, - 0x6a, 0xff, 0xab, 0x7e, 0x3b, 0xb1, 0x08, 0xc3, 0x07, 0x59, 0xf3, 0x2a, 0xa6, 0x3e, 0xf7, 0x4c, - 0xaa, 0x3c, 0xf5, 0xa9, 0xe9, 0x4f, 0xa2, 0xfe, 0xd4, 0x18, 0xe6, 0xd4, 0x6c, 0xb2, 0xe7, 0x64, - 0x6f, 0xaa, 0x50, 0x14, 0xbe, 0xcc, 0x18, 0x6c, 0x95, 0xed, 0x9e, 0xd6, 0x2e, 0xf9, 0xf2, 0xbf, - 0x9a, 0x69, 0xca, 0x07, 0x0b, 0x6a, 0xed, 0x52, 0xbf, 0x25, 0xd0, 0x2f, 0xa0, 0x27, 0x92, 0x7b, - 0x45, 0xaf, 0x16, 0x09, 0x47, 0x3e, 0x38, 0x5f, 0xc4, 0xb7, 0xdf, 0x06, 0xc4, 0x6c, 0xc7, 0x9b, - 0xd8, 0xd3, 0x79, 0x60, 0x32, 0xc5, 0x2a, 0x72, 0x37, 0xf9, 0xae, 0x02, 0xcd, 0x57, 0x4f, 0x31, - 0x22, 0x5e, 0xd2, 0x10, 0xe0, 0x01, 0x8e, 0xde, 0xc3, 0x51, 0x60, 0x8f, 0xc3, 0xec, 0x8a, 0x12, - 0x8f, 0xca, 0x3b, 0x08, 0x54, 0xaf, 0x2d, 0xec, 0x17, 0x23, 0x18, 0x41, 0xeb, 0x01, 0xc9, 0xbc, - 0x69, 0x36, 0x11, 0xa2, 0xc2, 0x91, 0xa2, 0x87, 0x40, 0xb1, 0xb9, 0xb8, 0x63, 0xda, 0x9d, 0x65, - 0x1e, 0x42, 0xa0, 0x42, 0xc1, 0xe6, 0x9f, 0x67, 0xa8, 0xdd, 0x59, 0xc1, 0xcb, 0x0a, 0xb6, 0xa2, - 0xdd, 0x43, 0x3c, 0x7e, 0xfe, 0x0e, 0x36, 0x9d, 0xe8, 0xb0, 0x60, 0x45, 0xa9, 0x1e, 0x27, 0xaf, - 0x48, 0xea, 0x18, 0xe3, 0xc0, 0xb0, 0xb6, 0x4b, 0x0f, 0x1d, 0xe5, 0x92, 0x65, 0x5b, 0x3d, 0x45, - 0xbe, 0x67, 0x49, 0xd5, 0x33, 0x61, 0x75, 0x2f, 0xf0, 0x67, 0x32, 0x92, 0x37, 0x95, 0x48, 0x72, - 0xfd, 0x4a, 0xa2, 0xf8, 0x3e, 0xb4, 0x45, 0x65, 0x48, 0x73, 0x59, 0x35, 0x17, 0xd2, 0x5d, 0x4a, - 0x4e, 0xfc, 0x21, 0xac, 0x64, 0x4a, 0x4e, 0xb5, 0xd0, 0xd5, 0x75, 0xe9, 0xa2, 0xd9, 0x5f, 0x02, - 0xa2, 0xaf, 0x5b, 0xa4, 0x97, 0x75, 0x05, 0x19, 0x47, 0xbe, 0xa3, 0x40, 0xb2, 0x5d, 0xba, 0x7f, - 0x2c, 0xf9, 0xdf, 0x81, 0x75, 0x65, 0x59, 0x97, 0x75, 0x08, 0xfc, 0x56, 0xd7, 0x09, 0xb5, 0x67, - 0xd6, 0x21, 0x9c, 0x38, 0x42, 0xe0, 0xdf, 0xf9, 0x69, 0x17, 0x9a, 0x34, 0xf3, 0xa2, 0xd2, 0xfa, - 0xff, 0xc4, 0xeb, 0x93, 0x4d, 0xbc, 0x3e, 0x84, 0x95, 0xcc, 0x8b, 0x11, 0xb5, 0xd2, 0xaa, 0x9f, - 0x95, 0x94, 0xc8, 0x1f, 0xe4, 0x37, 0x1b, 0xea, 0x50, 0xa8, 0x7c, 0xd7, 0xb1, 0x68, 0xee, 0x67, - 0xec, 0xb1, 0x55, 0x7c, 0x7c, 0xf9, 0x5a, 0xe1, 0x2e, 0xa8, 0x7c, 0xe3, 0xed, 0xf3, 0xcf, 0x4b, - 0x3e, 0xfd, 0xbc, 0xed, 0x43, 0x58, 0xc9, 0xdc, 0x3b, 0x56, 0x4b, 0x55, 0x7d, 0x39, 0x79, 0xd1, - 0xec, 0x9f, 0x61, 0x82, 0x63, 0xc1, 0x9a, 0xe2, 0x4a, 0x28, 0xda, 0x2a, 0xda, 0x5e, 0x54, 0xdf, - 0x1d, 0x5d, 0xbc, 0xa0, 0x8e, 0x64, 0x4a, 0xd9, 0x98, 0x90, 0x10, 0x99, 0x7d, 0x36, 0x3f, 0x78, - 0xa3, 0xdc, 0x1b, 0xfb, 0x78, 0x41, 0x07, 0xb0, 0xc4, 0x6e, 0x23, 0xa3, 0xaf, 0xa8, 0x8f, 0xe1, - 0x52, 0x37, 0x95, 0x07, 0x8b, 0xee, 0x33, 0x87, 0x73, 0x27, 0x0a, 0xe9, 0xa4, 0x75, 0xea, 0x21, - 0x91, 0xf2, 0x06, 0x7d, 0xfa, 0x0a, 0xf1, 0x60, 0xf1, 0xad, 0x61, 0x31, 0xe9, 0xff, 0xed, 0x2c, - 0xf0, 0x23, 0x58, 0x53, 0x1c, 0xce, 0xa3, 0xa2, 0x6c, 0xbf, 0xe0, 0x5a, 0xc0, 0x60, 0xbb, 0x74, - 0xff, 0x18, 0xf3, 0x0f, 0xa0, 0x97, 0xdd, 0xb6, 0x47, 0xaf, 0x17, 0xe9, 0xb3, 0x0a, 0xe7, 0xc9, - 0xca, 0x7c, 0xef, 0x6b, 0x1f, 0xec, 0x4c, 0xed, 0xe8, 0x70, 0x3e, 0x22, 0x2d, 0xdb, 0xac, 0xeb, - 0x9b, 0xb6, 0xcf, 0xbf, 0xb6, 0x05, 0xff, 0xb7, 0xe9, 0xe8, 0x6d, 0x8a, 0x6a, 0x36, 0x1a, 0x2d, - 0xd1, 0xdf, 0xdb, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x39, 0xcb, 0x3c, 0xff, 0x47, 0x00, - 0x00, + // 4210 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3c, 0x49, 0x6c, 0x24, 0x59, + 0x56, 0x15, 0xb9, 0xd8, 0x99, 0x2f, 0x17, 0xa7, 0xbf, 0xed, 0xaa, 0x9c, 0x9c, 0x5a, 0xdc, 0x51, + 0x5d, 0xdd, 0xa6, 0xba, 0xdb, 0xee, 0x71, 0xcd, 0x34, 0x35, 0x9b, 0x86, 0x2a, 0x7b, 0xaa, 0xda, + 0x74, 0xb5, 0xbb, 0x08, 0x57, 0xd5, 0xa0, 0x56, 0x33, 0x39, 0xe1, 0x8c, 0xef, 0x74, 0xa8, 0x62, + 0xc9, 0x8a, 0x88, 0xb4, 0xdb, 0x8d, 0xc4, 0x69, 0x2e, 0x83, 0x18, 0x24, 0x2e, 0x88, 0x03, 0xe2, + 0xc0, 0x22, 0x0d, 0x12, 0x48, 0x1c, 0xe0, 0xc6, 0x01, 0x09, 0x09, 0x4e, 0x20, 0x6e, 0x1c, 0x39, + 0x71, 0x02, 0x84, 0x38, 0x8c, 0xd0, 0xdc, 0xd0, 0xdf, 0x22, 0xe2, 0x47, 0xfc, 0x70, 0x86, 0xed, + 0xea, 0xe9, 0x6e, 0x34, 0xb7, 0x8c, 0xf7, 0x97, 0xf7, 0xfe, 0xdb, 0xdf, 0x5f, 0x12, 0x16, 0x5f, + 0x4c, 0x71, 0x70, 0x32, 0x1c, 0xf9, 0x7e, 0x60, 0xad, 0x4f, 0x02, 0x3f, 0xf2, 0x11, 0x72, 0x6d, + 0xe7, 0x68, 0x1a, 0xb2, 0xaf, 0x75, 0xda, 0x3e, 0x68, 0x8f, 0x7c, 0xd7, 0xf5, 0x3d, 0x06, 0x1b, + 0xb4, 0xd3, 0x3d, 0x06, 0x5d, 0xdb, 0x8b, 0x70, 0xe0, 0x99, 0x8e, 0x68, 0x0d, 0x47, 0x87, 0xd8, + 0x35, 0xf9, 0x57, 0xd3, 0x0d, 0xc7, 0xfc, 0x67, 0xcf, 0x32, 0x23, 0x33, 0x8d, 0x4a, 0xff, 0xa1, + 0x06, 0x97, 0xf7, 0x0e, 0xfd, 0xe3, 0x2d, 0xdf, 0x71, 0xf0, 0x28, 0xb2, 0x7d, 0x2f, 0x34, 0xf0, + 0x8b, 0x29, 0x0e, 0x23, 0xf4, 0x36, 0xd4, 0xf6, 0xcd, 0x10, 0xf7, 0xb5, 0x55, 0x6d, 0xad, 0xb5, + 0x79, 0x75, 0x5d, 0x22, 0x8a, 0x53, 0xf3, 0x7e, 0x38, 0xbe, 0x6f, 0x86, 0xd8, 0xa0, 0x3d, 0x11, + 0x82, 0x9a, 0xb5, 0xbf, 0xb3, 0xdd, 0xaf, 0xac, 0x6a, 0x6b, 0x55, 0x83, 0xfe, 0x46, 0xaf, 0x42, + 0x67, 0x14, 0xcf, 0xbd, 0xb3, 0x1d, 0xf6, 0xab, 0xab, 0xd5, 0xb5, 0xaa, 0x21, 0x03, 0xf5, 0x7f, + 0xd3, 0xe0, 0x4a, 0x8e, 0x8c, 0x70, 0xe2, 0x7b, 0x21, 0x46, 0x77, 0x60, 0x2e, 0x8c, 0xcc, 0x68, + 0x1a, 0x72, 0x4a, 0xbe, 0xac, 0xa4, 0x64, 0x8f, 0x76, 0x31, 0x78, 0xd7, 0x3c, 0xda, 0x8a, 0x02, + 0x2d, 0xfa, 0x0a, 0x2c, 0xdb, 0xde, 0xfb, 0xd8, 0xf5, 0x83, 0x93, 0xe1, 0x04, 0x07, 0x23, 0xec, + 0x45, 0xe6, 0x18, 0x0b, 0x1a, 0x97, 0x44, 0xdb, 0xe3, 0xa4, 0x09, 0xbd, 0x03, 0x57, 0x98, 0xc0, + 0x42, 0x1c, 0x1c, 0xd9, 0x23, 0x3c, 0x34, 0x8f, 0x4c, 0xdb, 0x31, 0xf7, 0x1d, 0xdc, 0xaf, 0xad, + 0x56, 0xd7, 0x1a, 0xc6, 0x0a, 0x6d, 0xde, 0x63, 0xad, 0xf7, 0x44, 0xa3, 0xfe, 0x67, 0x1a, 0xac, + 0x90, 0x15, 0x3e, 0x36, 0x83, 0xc8, 0xfe, 0x14, 0xf8, 0xac, 0x43, 0x3b, 0xbd, 0xb6, 0x7e, 0x95, + 0xb6, 0x49, 0x30, 0xd2, 0x67, 0x22, 0xd0, 0x13, 0x9e, 0xd4, 0xe8, 0x32, 0x25, 0x98, 0xfe, 0xa7, + 0x5c, 0x21, 0xd2, 0x74, 0x5e, 0x44, 0x10, 0x59, 0x9c, 0x95, 0x3c, 0xce, 0x73, 0x88, 0x41, 0xff, + 0xa7, 0x2a, 0xac, 0x3c, 0xf2, 0x4d, 0x2b, 0x51, 0x98, 0x9f, 0x3f, 0x3b, 0xbf, 0x0d, 0x73, 0xcc, + 0xd0, 0xfa, 0x35, 0x8a, 0xeb, 0x96, 0x8c, 0x8b, 0x1b, 0x61, 0x42, 0xe1, 0x1e, 0x05, 0x18, 0x7c, + 0x10, 0xba, 0x05, 0xdd, 0x00, 0x4f, 0x1c, 0x7b, 0x64, 0x0e, 0xbd, 0xa9, 0xbb, 0x8f, 0x83, 0x7e, + 0x7d, 0x55, 0x5b, 0xab, 0x1b, 0x1d, 0x0e, 0xdd, 0xa5, 0x40, 0xf4, 0x03, 0xe8, 0x1c, 0xd8, 0xd8, + 0xb1, 0x86, 0xb6, 0x67, 0xe1, 0x8f, 0x77, 0xb6, 0xfb, 0x73, 0xab, 0xd5, 0xb5, 0xd6, 0xe6, 0x37, + 0xd7, 0xf3, 0x4e, 0x62, 0x5d, 0xc9, 0x91, 0xf5, 0x07, 0x64, 0xf8, 0x0e, 0x1b, 0xfd, 0x5d, 0x2f, + 0x0a, 0x4e, 0x8c, 0xf6, 0x41, 0x0a, 0x84, 0xfa, 0x30, 0x1f, 0xe0, 0x83, 0x00, 0x87, 0x87, 0xfd, + 0xf9, 0x55, 0x6d, 0xad, 0x61, 0x88, 0x4f, 0xf4, 0x3a, 0x2c, 0x04, 0x38, 0xf4, 0xa7, 0xc1, 0x08, + 0x0f, 0xc7, 0x81, 0x3f, 0x9d, 0x84, 0xfd, 0xc6, 0x6a, 0x75, 0xad, 0x69, 0x74, 0x05, 0xf8, 0x21, + 0x85, 0x0e, 0xbe, 0x03, 0x8b, 0x39, 0x2c, 0xa8, 0x07, 0xd5, 0xe7, 0xf8, 0x84, 0x0a, 0xa2, 0x6a, + 0x90, 0x9f, 0x68, 0x19, 0xea, 0x47, 0xa6, 0x33, 0xc5, 0x9c, 0xd5, 0xec, 0xe3, 0x1b, 0x95, 0xbb, + 0x9a, 0xfe, 0x87, 0x1a, 0xf4, 0x0d, 0xec, 0x60, 0x33, 0xc4, 0x9f, 0xa5, 0x48, 0x2f, 0xc3, 0x9c, + 0xe7, 0x5b, 0x78, 0x67, 0x9b, 0x8a, 0xb4, 0x6a, 0xf0, 0x2f, 0xfd, 0x67, 0x1a, 0x2c, 0x3f, 0xc4, + 0x11, 0xd1, 0x6d, 0x3b, 0x8c, 0xec, 0x51, 0x6c, 0xbc, 0xdf, 0x86, 0x6a, 0x80, 0x5f, 0x70, 0xca, + 0xde, 0x90, 0x29, 0x8b, 0xbd, 0xb2, 0x6a, 0xa4, 0x41, 0xc6, 0xa1, 0x57, 0xa0, 0x6d, 0xb9, 0xce, + 0x70, 0x74, 0x68, 0x7a, 0x1e, 0x76, 0x98, 0x75, 0x34, 0x8d, 0x96, 0xe5, 0x3a, 0x5b, 0x1c, 0x84, + 0xae, 0x03, 0x84, 0x78, 0xec, 0x62, 0x2f, 0x4a, 0xbc, 0x67, 0x0a, 0x82, 0x6e, 0xc3, 0xe2, 0x41, + 0xe0, 0xbb, 0xc3, 0xf0, 0xd0, 0x0c, 0xac, 0xa1, 0x83, 0x4d, 0x0b, 0x07, 0x94, 0xfa, 0x86, 0xb1, + 0x40, 0x1a, 0xf6, 0x08, 0xfc, 0x11, 0x05, 0xa3, 0x3b, 0x50, 0x0f, 0x47, 0xfe, 0x04, 0x53, 0x4d, + 0xeb, 0x6e, 0x5e, 0x53, 0xe9, 0xd0, 0xb6, 0x19, 0x99, 0x7b, 0xa4, 0x93, 0xc1, 0xfa, 0xea, 0xff, + 0xc3, 0x4d, 0xed, 0x73, 0xee, 0xb9, 0x52, 0xe6, 0x58, 0x7f, 0x39, 0xe6, 0x38, 0x57, 0xca, 0x1c, + 0xe7, 0x4f, 0x37, 0xc7, 0x1c, 0xd7, 0xce, 0x62, 0x8e, 0x8d, 0x99, 0xe6, 0xd8, 0xfc, 0x74, 0xcc, + 0xf1, 0xef, 0x12, 0x73, 0xfc, 0xbc, 0x8b, 0x3d, 0x31, 0xd9, 0xba, 0x64, 0xb2, 0x7f, 0xae, 0xc1, + 0x97, 0x1e, 0xe2, 0x28, 0x26, 0x9f, 0x58, 0x20, 0xfe, 0x9c, 0x06, 0xdd, 0xbf, 0xd4, 0x60, 0xa0, + 0xa2, 0xf5, 0x22, 0x81, 0xf7, 0x43, 0xb8, 0x1c, 0xe3, 0x18, 0x5a, 0x38, 0x1c, 0x05, 0xf6, 0x84, + 0x8a, 0x91, 0x3a, 0x99, 0xd6, 0xe6, 0x4d, 0x95, 0xc6, 0x66, 0x29, 0x58, 0x89, 0xa7, 0xd8, 0x4e, + 0xcd, 0xa0, 0xff, 0x58, 0x83, 0x15, 0xe2, 0xd4, 0xb8, 0x17, 0xf2, 0x0e, 0xfc, 0xf3, 0xf3, 0x55, + 0xf6, 0x6f, 0x95, 0x9c, 0x7f, 0x2b, 0xc1, 0x63, 0x9a, 0xc5, 0x66, 0xe9, 0xb9, 0x08, 0xef, 0xbe, + 0x06, 0x75, 0xdb, 0x3b, 0xf0, 0x05, 0xab, 0x6e, 0xa8, 0x58, 0x95, 0x46, 0xc6, 0x7a, 0xeb, 0x1e, + 0xa3, 0x22, 0x71, 0xb8, 0x17, 0x50, 0xb7, 0xec, 0xb2, 0x2b, 0x8a, 0x65, 0xff, 0x8e, 0x06, 0x57, + 0x72, 0x08, 0x2f, 0xb2, 0xee, 0x6f, 0xc1, 0x1c, 0x0d, 0x23, 0x62, 0xe1, 0xaf, 0x2a, 0x17, 0x9e, + 0x42, 0xf7, 0xc8, 0x0e, 0x23, 0x83, 0x8f, 0xd1, 0x7d, 0xe8, 0x65, 0xdb, 0x48, 0x80, 0xe3, 0xc1, + 0x6d, 0xe8, 0x99, 0x2e, 0x63, 0x40, 0xd3, 0x68, 0x71, 0xd8, 0xae, 0xe9, 0x62, 0xf4, 0x25, 0x68, + 0x10, 0x93, 0x1d, 0xda, 0x96, 0x10, 0xff, 0x3c, 0x35, 0x61, 0x2b, 0x44, 0xd7, 0x00, 0x68, 0x93, + 0x69, 0x59, 0x01, 0x8b, 0x7d, 0x4d, 0xa3, 0x49, 0x20, 0xf7, 0x08, 0x40, 0xff, 0x03, 0x0d, 0xae, + 0xef, 0x9d, 0x78, 0xa3, 0x5d, 0x7c, 0xbc, 0x15, 0x60, 0x33, 0xc2, 0x89, 0xb7, 0xfd, 0x54, 0x19, + 0x8f, 0x56, 0xa1, 0x95, 0xb2, 0x5f, 0xae, 0x92, 0x69, 0x90, 0xfe, 0x7b, 0x1a, 0xb4, 0x89, 0xfb, + 0x7f, 0x1f, 0x47, 0x26, 0x51, 0x11, 0xf4, 0x75, 0x68, 0x3a, 0xbe, 0x69, 0x0d, 0xa3, 0x93, 0x09, + 0xa3, 0xa6, 0x9b, 0xa5, 0x26, 0x89, 0x19, 0x4f, 0x4e, 0x26, 0xd8, 0x68, 0x38, 0xfc, 0x57, 0x29, + 0x8a, 0xb2, 0x5e, 0xa6, 0xaa, 0xf0, 0x32, 0xff, 0x50, 0x87, 0xcb, 0xdf, 0x33, 0xa3, 0xd1, 0xe1, + 0xb6, 0x2b, 0xb2, 0x8b, 0xf3, 0xb3, 0x29, 0x71, 0xbb, 0x95, 0xb4, 0xdb, 0x7d, 0x69, 0x6e, 0x3d, + 0x36, 0xc1, 0xba, 0xca, 0x04, 0x49, 0x1d, 0xbb, 0xfe, 0x8c, 0x6b, 0x51, 0xca, 0x04, 0x53, 0x49, + 0xc0, 0xdc, 0x79, 0x92, 0x80, 0x2d, 0xe8, 0xe0, 0x8f, 0x47, 0xce, 0x94, 0xa8, 0x23, 0xc5, 0xce, + 0xa2, 0xfb, 0x75, 0x05, 0xf6, 0xb4, 0xfd, 0xb7, 0xf9, 0xa0, 0x1d, 0x4e, 0x03, 0x13, 0xb5, 0x8b, + 0x23, 0x93, 0x86, 0xf0, 0xd6, 0xe6, 0x6a, 0x91, 0xa8, 0x85, 0x7e, 0x30, 0x71, 0x93, 0x2f, 0x74, + 0x15, 0x9a, 0x3c, 0xe5, 0xd8, 0xd9, 0xee, 0x37, 0x29, 0xfb, 0x12, 0x00, 0x32, 0xa1, 0xc3, 0x9d, + 0x23, 0xa7, 0x10, 0x28, 0x85, 0xdf, 0x52, 0x21, 0x50, 0x0b, 0x3b, 0x4d, 0x79, 0xc8, 0x13, 0x90, + 0x30, 0x05, 0x22, 0xb5, 0xb3, 0x7f, 0x70, 0xe0, 0xd8, 0x1e, 0xde, 0x65, 0x12, 0x6e, 0x51, 0x22, + 0x64, 0x20, 0x49, 0x53, 0x8e, 0x70, 0x10, 0xda, 0xbe, 0xd7, 0x6f, 0xd3, 0x76, 0xf1, 0x39, 0x18, + 0xc2, 0x62, 0x0e, 0x85, 0x22, 0xfb, 0xf8, 0x6a, 0x3a, 0xfb, 0x98, 0xcd, 0xe3, 0x54, 0x76, 0xf2, + 0x13, 0x0d, 0x56, 0x9e, 0x7a, 0xe1, 0x74, 0x3f, 0x5e, 0xdb, 0x67, 0xa3, 0xc7, 0x59, 0xe7, 0x56, + 0xcb, 0x39, 0x37, 0xfd, 0x87, 0x75, 0x58, 0xe0, 0xab, 0x20, 0xe2, 0xa6, 0xae, 0xe0, 0x2a, 0x34, + 0xe3, 0xf8, 0xc6, 0x19, 0x92, 0x00, 0xb2, 0xbe, 0xa5, 0x92, 0xf3, 0x2d, 0xa5, 0x48, 0x13, 0xd9, + 0x4a, 0x2d, 0x95, 0xad, 0x5c, 0x03, 0x38, 0x70, 0xa6, 0xe1, 0xe1, 0x30, 0xb2, 0x5d, 0xcc, 0xb3, + 0xa5, 0x26, 0x85, 0x3c, 0xb1, 0x5d, 0x8c, 0xee, 0x41, 0x7b, 0xdf, 0xf6, 0x1c, 0x7f, 0x3c, 0x9c, + 0x98, 0xd1, 0x61, 0xc8, 0xeb, 0x4c, 0x95, 0x58, 0x68, 0x6e, 0x79, 0x9f, 0xf6, 0x35, 0x5a, 0x6c, + 0xcc, 0x63, 0x32, 0x04, 0x5d, 0x87, 0x96, 0x37, 0x75, 0x87, 0xfe, 0xc1, 0x30, 0xf0, 0x8f, 0x43, + 0x5a, 0x4d, 0x56, 0x8d, 0xa6, 0x37, 0x75, 0x3f, 0x38, 0x30, 0xfc, 0x63, 0x12, 0x5f, 0x9a, 0x24, + 0xd2, 0x84, 0x8e, 0x3f, 0x66, 0x95, 0xe4, 0xec, 0xf9, 0x93, 0x01, 0x64, 0xb4, 0x85, 0x9d, 0xc8, + 0xa4, 0xa3, 0x9b, 0xe5, 0x46, 0xc7, 0x03, 0xd0, 0x6b, 0xd0, 0x1d, 0xf9, 0xee, 0xc4, 0xa4, 0x1c, + 0x7a, 0x10, 0xf8, 0x2e, 0xb5, 0x9c, 0xaa, 0x91, 0x81, 0xa2, 0x2d, 0x68, 0xd1, 0xd4, 0x9e, 0x9b, + 0x57, 0x8b, 0xe2, 0xd1, 0x55, 0xe6, 0x95, 0x4a, 0xb1, 0x89, 0x82, 0x82, 0x2d, 0x7e, 0x86, 0x44, + 0x33, 0x84, 0x95, 0x86, 0xf6, 0x27, 0x98, 0x5b, 0x48, 0x8b, 0xc3, 0xf6, 0xec, 0x4f, 0x30, 0xa9, + 0x37, 0x6c, 0x2f, 0xc4, 0x41, 0x24, 0xaa, 0xbf, 0x7e, 0x87, 0xaa, 0x4f, 0x87, 0x41, 0xb9, 0x62, + 0xa3, 0x6d, 0xe8, 0x86, 0x91, 0x19, 0x44, 0xc3, 0x89, 0x1f, 0x52, 0x05, 0xe8, 0x77, 0xa9, 0x6e, + 0x67, 0x6a, 0x37, 0x37, 0x1c, 0x13, 0xc5, 0x7e, 0xcc, 0x3b, 0x19, 0x1d, 0x3a, 0x48, 0x7c, 0xea, + 0xff, 0x5d, 0x81, 0xae, 0x4c, 0x2e, 0xb1, 0x5f, 0x56, 0x76, 0x08, 0x1d, 0x14, 0x9f, 0x84, 0x78, + 0xec, 0x99, 0xfb, 0x0e, 0x66, 0x35, 0x0e, 0x55, 0xc1, 0x86, 0xd1, 0x62, 0x30, 0x3a, 0x01, 0x51, + 0x25, 0xc6, 0x24, 0xaa, 0xf7, 0x55, 0x4a, 0x78, 0x93, 0x42, 0x68, 0x48, 0xef, 0xc3, 0xbc, 0x28, + 0x8f, 0x98, 0x02, 0x8a, 0x4f, 0xd2, 0xb2, 0x3f, 0xb5, 0x29, 0x56, 0xa6, 0x80, 0xe2, 0x13, 0x6d, + 0x43, 0x9b, 0x4d, 0x39, 0x31, 0x03, 0xd3, 0x15, 0xea, 0xf7, 0x8a, 0xd2, 0x84, 0xdf, 0xc3, 0x27, + 0xcf, 0x88, 0x37, 0x78, 0x6c, 0xda, 0x81, 0xc1, 0xc4, 0xf5, 0x98, 0x8e, 0x42, 0x6b, 0xd0, 0x63, + 0xb3, 0x1c, 0xd8, 0x0e, 0xe6, 0x8a, 0x3c, 0xcf, 0x6a, 0x24, 0x0a, 0x7f, 0x60, 0x3b, 0x98, 0xe9, + 0x6a, 0xbc, 0x04, 0x2a, 0xa0, 0x06, 0x53, 0x55, 0x0a, 0xa1, 0xe2, 0xb9, 0x09, 0x1d, 0xd6, 0x2c, + 0x9c, 0x1c, 0xf3, 0xc4, 0x8c, 0xc6, 0x67, 0x0c, 0x46, 0x53, 0x97, 0xa9, 0xcb, 0x94, 0x1d, 0xd8, + 0x72, 0xbc, 0xa9, 0x4b, 0x54, 0x5d, 0xff, 0x71, 0x0d, 0x96, 0x88, 0xc5, 0x73, 0xe3, 0xbf, 0x40, + 0xa4, 0xbd, 0x06, 0x60, 0x85, 0xd1, 0x50, 0xf2, 0x52, 0x4d, 0x2b, 0x8c, 0xb8, 0x1f, 0xfe, 0xba, + 0x08, 0x94, 0xd5, 0xe2, 0xb4, 0x3e, 0xe3, 0x81, 0xf2, 0xc1, 0xf2, 0x5c, 0x1b, 0x58, 0x37, 0xa1, + 0xc3, 0x8b, 0x51, 0xa9, 0x00, 0x6b, 0x33, 0xe0, 0xae, 0xda, 0x8f, 0xce, 0x29, 0x37, 0xd2, 0x52, + 0x01, 0x73, 0xfe, 0x62, 0x01, 0xb3, 0x91, 0x0d, 0x98, 0x0f, 0x60, 0x81, 0x3a, 0x81, 0xd8, 0x80, + 0x84, 0xef, 0x98, 0x61, 0x41, 0x5d, 0x3a, 0x4a, 0x7c, 0x86, 0xe9, 0x78, 0x07, 0x52, 0xbc, 0x23, + 0x7c, 0xf0, 0x30, 0xb6, 0x86, 0x51, 0x60, 0x7a, 0xe1, 0x01, 0x0e, 0x68, 0xbc, 0x6c, 0x18, 0x6d, + 0x02, 0x7c, 0xc2, 0x61, 0xfa, 0x3f, 0x57, 0xe0, 0x32, 0xaf, 0xa8, 0x2f, 0xae, 0x12, 0x45, 0x41, + 0x4b, 0x78, 0xfd, 0xea, 0x29, 0x35, 0x6a, 0xad, 0x44, 0x42, 0x56, 0x57, 0x24, 0x64, 0x72, 0x9d, + 0x36, 0x97, 0xab, 0xd3, 0xe2, 0xbd, 0xa5, 0xf9, 0xf2, 0x7b, 0x4b, 0x68, 0x19, 0xea, 0xb4, 0x78, + 0xa0, 0x62, 0x6b, 0x1a, 0xec, 0xa3, 0x1c, 0x43, 0xff, 0x43, 0x83, 0xce, 0x1e, 0x36, 0x83, 0xd1, + 0xa1, 0xe0, 0xe3, 0x3b, 0xe9, 0xbd, 0xb8, 0x57, 0x0b, 0xf6, 0xe2, 0xa4, 0x21, 0x5f, 0x9c, 0x4d, + 0xb8, 0xff, 0xd4, 0xa0, 0xfd, 0x6b, 0xa4, 0x49, 0x2c, 0xf6, 0x6e, 0x7a, 0xb1, 0xaf, 0x15, 0x2c, + 0xd6, 0xc0, 0x51, 0x60, 0xe3, 0x23, 0xfc, 0x85, 0x5b, 0xee, 0x3f, 0x6a, 0x30, 0x20, 0x95, 0x9d, + 0xc1, 0xcc, 0xf8, 0xe2, 0x16, 0x73, 0x13, 0x3a, 0x47, 0x52, 0xae, 0x56, 0xa1, 0x0a, 0xd7, 0x3e, + 0x4a, 0x57, 0xa2, 0x06, 0xf4, 0xc4, 0x16, 0x20, 0x5f, 0xac, 0xf0, 0xaa, 0xaf, 0xab, 0xa8, 0xce, + 0x10, 0x47, 0xbd, 0xd2, 0x42, 0x20, 0x03, 0xf5, 0xdf, 0xd5, 0x60, 0x49, 0xd1, 0x11, 0x5d, 0x81, + 0x79, 0x5e, 0xf5, 0xf2, 0xf0, 0xcb, 0x6c, 0xd8, 0x22, 0xe2, 0x49, 0xf6, 0x6d, 0x6c, 0x2b, 0x9f, + 0x00, 0x5a, 0xe8, 0x06, 0xb4, 0xe2, 0x1a, 0xc0, 0xca, 0xc9, 0xc7, 0x0a, 0xd1, 0x00, 0x1a, 0xdc, + 0x39, 0x89, 0xe2, 0x2a, 0xfe, 0xd6, 0xff, 0x56, 0x83, 0xcb, 0xef, 0x9a, 0x9e, 0xe5, 0x1f, 0x1c, + 0x5c, 0x9c, 0xad, 0x5b, 0x20, 0x95, 0x0e, 0x65, 0xf7, 0x4b, 0xe4, 0x7a, 0xe3, 0x0d, 0x58, 0x0c, + 0x98, 0x67, 0xb4, 0x64, 0xbe, 0x57, 0x8d, 0x9e, 0x68, 0x88, 0xf9, 0xf9, 0x17, 0x15, 0x40, 0x24, + 0x0e, 0xdc, 0x37, 0x1d, 0xd3, 0x1b, 0xe1, 0xf3, 0x93, 0x7e, 0x0b, 0xba, 0x52, 0xf4, 0x8a, 0x8f, + 0x08, 0xd3, 0xe1, 0x2b, 0x44, 0xef, 0x41, 0x77, 0x9f, 0xa1, 0x1a, 0x06, 0xd8, 0x0c, 0x7d, 0x8f, + 0x3a, 0xd7, 0xae, 0x7a, 0x6b, 0xe4, 0x49, 0x60, 0x8f, 0xc7, 0x38, 0xd8, 0xf2, 0x3d, 0x8b, 0xa7, + 0x61, 0xfb, 0x82, 0x4c, 0x32, 0x94, 0x08, 0x2e, 0x09, 0xe5, 0x42, 0x34, 0x10, 0xc7, 0x72, 0xca, + 0x8a, 0x10, 0x9b, 0x4e, 0xc2, 0x88, 0xc4, 0x1b, 0xf7, 0x58, 0xc3, 0x5e, 0xf1, 0xce, 0x98, 0x22, + 0xb4, 0xea, 0x7f, 0xad, 0x01, 0x8a, 0xab, 0x24, 0x5a, 0x0f, 0x52, 0xed, 0xcb, 0x0e, 0xd5, 0x14, + 0x41, 0xe1, 0x2a, 0x34, 0x2d, 0x31, 0x92, 0x9b, 0x4b, 0x02, 0xa0, 0x3e, 0x9a, 0x12, 0x3d, 0x24, + 0x71, 0x18, 0x5b, 0xa2, 0x0a, 0x61, 0xc0, 0x47, 0x14, 0x26, 0x47, 0xe6, 0x5a, 0x36, 0x32, 0xa7, + 0x37, 0x7e, 0xea, 0xd2, 0xc6, 0x8f, 0xfe, 0x93, 0x0a, 0xf4, 0xa8, 0xbb, 0xdb, 0x4a, 0x4a, 0xfc, + 0x52, 0x44, 0xdf, 0x84, 0x0e, 0x3f, 0x4f, 0x97, 0x08, 0x6f, 0xbf, 0x48, 0x4d, 0x86, 0xde, 0x86, + 0x65, 0xd6, 0x29, 0xc0, 0xe1, 0xd4, 0x49, 0x12, 0x70, 0x96, 0xc7, 0xa2, 0x17, 0xcc, 0xcf, 0x92, + 0x26, 0x31, 0xe2, 0x29, 0x5c, 0x1e, 0x3b, 0xfe, 0xbe, 0xe9, 0x0c, 0x65, 0xf1, 0x30, 0x19, 0x96, + 0xd0, 0xf8, 0x65, 0x36, 0x7c, 0x2f, 0x2d, 0xc3, 0x10, 0xdd, 0x27, 0xc5, 0x3c, 0x7e, 0x9e, 0xe4, + 0xf6, 0xf5, 0x32, 0xb9, 0x7d, 0x9b, 0x8c, 0x89, 0x53, 0xfb, 0x3f, 0xd2, 0x60, 0x21, 0xb3, 0x6d, + 0x9b, 0xad, 0x21, 0xb5, 0x7c, 0x0d, 0x79, 0x17, 0xea, 0xa4, 0xb0, 0x62, 0x7e, 0xb0, 0xab, 0xae, + 0x6f, 0xe4, 0x59, 0x0d, 0x36, 0x00, 0x6d, 0xc0, 0x92, 0xe2, 0xb0, 0x96, 0x8b, 0x1f, 0xe5, 0xcf, + 0x6a, 0xf5, 0x9f, 0xd6, 0xa0, 0x95, 0x62, 0xc5, 0x8c, 0xf2, 0xf7, 0xa5, 0x6c, 0xbf, 0x15, 0x9d, + 0xe3, 0x11, 0x95, 0x73, 0xb1, 0xcb, 0x52, 0x7e, 0x5e, 0x7f, 0xb8, 0xd8, 0xa5, 0x09, 0x7f, 0x3a, + 0x97, 0x9f, 0x93, 0x72, 0xf9, 0x4c, 0xb5, 0x33, 0x7f, 0x4a, 0xb5, 0xd3, 0x90, 0xab, 0x1d, 0xc9, + 0x84, 0x9a, 0x59, 0x13, 0x2a, 0x5b, 0x91, 0xbe, 0x0d, 0x4b, 0x23, 0xb6, 0xbd, 0x79, 0xff, 0x64, + 0x2b, 0x6e, 0xe2, 0x49, 0x91, 0xaa, 0x09, 0x3d, 0x48, 0x36, 0x89, 0x98, 0x94, 0xdb, 0x54, 0xca, + 0xea, 0x62, 0x8a, 0xcb, 0x86, 0x09, 0x59, 0x78, 0x66, 0xfa, 0x95, 0xad, 0x85, 0x3b, 0xe7, 0xaa, + 0x85, 0x6f, 0x40, 0x4b, 0x44, 0x55, 0x62, 0xe9, 0x5d, 0xe6, 0xf4, 0x84, 0x1b, 0xb0, 0x42, 0xc9, + 0x0f, 0x2c, 0xc8, 0x1b, 0xc0, 0xd9, 0x52, 0xb4, 0x97, 0x2f, 0x45, 0xaf, 0xc0, 0xbc, 0x1d, 0x0e, + 0x0f, 0xcc, 0xe7, 0xb8, 0xbf, 0x48, 0x5b, 0xe7, 0xec, 0xf0, 0x81, 0xf9, 0x1c, 0xeb, 0xff, 0x52, + 0x85, 0x6e, 0x52, 0xbb, 0x94, 0xf6, 0x20, 0x65, 0x2e, 0x2c, 0xec, 0x42, 0x2f, 0x89, 0xd1, 0x94, + 0xc3, 0xa7, 0x96, 0x5f, 0xd9, 0x53, 0x95, 0x85, 0x49, 0xc6, 0x5e, 0xa5, 0xcd, 0xe1, 0xda, 0x99, + 0x36, 0x87, 0x2f, 0x78, 0xea, 0x79, 0x07, 0x56, 0xe2, 0xd8, 0x2b, 0x2d, 0x9b, 0x25, 0xf8, 0xcb, + 0xa2, 0xf1, 0x71, 0x7a, 0xf9, 0x05, 0x2e, 0x60, 0xbe, 0xc8, 0x05, 0x64, 0x55, 0xa0, 0x91, 0x53, + 0x81, 0xfc, 0xe1, 0x6b, 0x53, 0x71, 0xf8, 0xaa, 0x3f, 0x85, 0x25, 0xba, 0xef, 0x17, 0x8e, 0x02, + 0x7b, 0x1f, 0xc7, 0xe9, 0x6a, 0x19, 0xb1, 0x0e, 0xa0, 0x91, 0xc9, 0x78, 0xe3, 0x6f, 0xfd, 0xb7, + 0x35, 0xb8, 0x9c, 0x9f, 0x97, 0x6a, 0x4c, 0xe2, 0x48, 0x34, 0xc9, 0x91, 0xfc, 0x3a, 0x2c, 0x25, + 0xd3, 0xcb, 0xb9, 0x74, 0x41, 0xb6, 0xa8, 0x20, 0xdc, 0x40, 0xc9, 0x1c, 0x02, 0xa6, 0xff, 0x54, + 0x8b, 0xb7, 0x4f, 0x09, 0x6c, 0x4c, 0x37, 0x95, 0x49, 0x5c, 0xf3, 0x3d, 0xc7, 0xf6, 0xe2, 0x5a, + 0x9b, 0xaf, 0x91, 0x01, 0x79, 0xad, 0xfd, 0x2e, 0x2c, 0xf0, 0x4e, 0x71, 0x78, 0x2a, 0x99, 0x90, + 0x75, 0xd9, 0xb8, 0x38, 0x30, 0xdd, 0x82, 0x2e, 0xdf, 0xed, 0x15, 0xf8, 0xaa, 0xaa, 0x3d, 0xe0, + 0x5f, 0x85, 0x9e, 0xe8, 0x76, 0xd6, 0x80, 0xb8, 0xc0, 0x07, 0xc6, 0x89, 0xdd, 0x8f, 0x34, 0xe8, + 0xcb, 0xe1, 0x31, 0xb5, 0xfc, 0xb3, 0xa7, 0x77, 0xdf, 0x94, 0x8f, 0xf0, 0x6e, 0x9d, 0x42, 0x4f, + 0x82, 0x47, 0x1c, 0xe4, 0xed, 0xd2, 0xe3, 0x58, 0x52, 0x95, 0x6c, 0xdb, 0x61, 0x14, 0xd8, 0xfb, + 0xd3, 0x0b, 0x9d, 0x29, 0xe9, 0x7f, 0x53, 0x81, 0x2f, 0x2b, 0x27, 0xbc, 0xc8, 0x61, 0x5d, 0xd1, + 0x26, 0xc0, 0x7d, 0x68, 0x64, 0xaa, 0x97, 0xd7, 0x4e, 0x59, 0x3c, 0xdf, 0xca, 0x62, 0x5b, 0x2a, + 0x61, 0x92, 0x98, 0x24, 0xd6, 0x52, 0x2b, 0x9e, 0x83, 0x2b, 0xad, 0x34, 0x87, 0x18, 0x87, 0xee, + 0x41, 0x9b, 0x55, 0x86, 0xc3, 0x23, 0x1b, 0x1f, 0x8b, 0x83, 0x9c, 0xeb, 0x4a, 0xbf, 0x46, 0xfb, + 0x3d, 0xb3, 0xf1, 0xb1, 0xd1, 0x72, 0xe2, 0xdf, 0xa1, 0xfe, 0x5f, 0x55, 0x80, 0xa4, 0x8d, 0x94, + 0xa5, 0x89, 0xc1, 0x70, 0x0b, 0x48, 0x41, 0x48, 0x20, 0x96, 0xd3, 0x3e, 0xf1, 0x89, 0x8c, 0x64, + 0x3f, 0xd6, 0xb2, 0xc3, 0x88, 0xf3, 0x65, 0xe3, 0x74, 0x5a, 0x04, 0x8b, 0x88, 0xc8, 0xd8, 0x39, + 0x89, 0x28, 0xbb, 0x08, 0x04, 0xbd, 0x05, 0x68, 0x1c, 0xf8, 0xc7, 0xb6, 0x37, 0x4e, 0x27, 0xeb, + 0x2c, 0xa7, 0x5f, 0xe4, 0x2d, 0xa9, 0x6c, 0xfd, 0xfb, 0xd0, 0xcb, 0x74, 0x17, 0x2c, 0xb9, 0x33, + 0x83, 0x8c, 0x87, 0xd2, 0x5c, 0xfc, 0xc8, 0x66, 0x41, 0xc6, 0x10, 0x0e, 0x86, 0xd0, 0xcb, 0xd2, + 0xab, 0x38, 0x74, 0xf9, 0x9a, 0x7c, 0xe8, 0x72, 0x9a, 0x99, 0x92, 0x69, 0x52, 0xa7, 0x2e, 0x03, + 0x13, 0x96, 0x55, 0x94, 0x28, 0x90, 0xdc, 0x91, 0x91, 0xcc, 0x48, 0x67, 0x53, 0x07, 0x3b, 0xdf, + 0x89, 0x33, 0x45, 0xca, 0xe1, 0x22, 0xe7, 0x9b, 0xda, 0x8a, 0xab, 0x48, 0x5b, 0x71, 0xfa, 0xef, + 0x6b, 0x80, 0xf2, 0x8a, 0x8d, 0xba, 0x50, 0x89, 0x27, 0xa9, 0xec, 0x6c, 0x67, 0x14, 0xa9, 0x92, + 0x53, 0xa4, 0xab, 0xd0, 0x8c, 0x83, 0x21, 0xf7, 0x7c, 0x09, 0x20, 0xad, 0x66, 0x35, 0x59, 0xcd, + 0x52, 0x84, 0xd5, 0x65, 0xc2, 0x0e, 0x01, 0xe5, 0x8d, 0x25, 0x3d, 0x93, 0x26, 0xcf, 0x34, 0x8b, + 0xc2, 0x14, 0xa6, 0xaa, 0x8c, 0xe9, 0x4f, 0xaa, 0x80, 0x92, 0x70, 0x1f, 0x1f, 0x3a, 0x95, 0x89, + 0x91, 0x1b, 0xb0, 0x94, 0x4f, 0x06, 0x44, 0x06, 0x84, 0x72, 0xa9, 0x80, 0x2a, 0x6c, 0x57, 0x55, + 0x77, 0xa6, 0xde, 0x89, 0xdd, 0x1b, 0xcb, 0x6d, 0xae, 0x17, 0xe5, 0x36, 0x19, 0x0f, 0xf7, 0x1b, + 0xd9, 0xbb, 0x56, 0xcc, 0x5e, 0xee, 0x2a, 0x5d, 0x51, 0x6e, 0xc9, 0x33, 0x2f, 0x5a, 0x49, 0x59, + 0xd7, 0xdc, 0x59, 0xb2, 0xae, 0x8b, 0x5f, 0xb0, 0xfa, 0xd7, 0x0a, 0x2c, 0xc6, 0x8c, 0x3c, 0x93, + 0x90, 0x66, 0x9f, 0x0f, 0x7e, 0xca, 0x52, 0xf9, 0x48, 0x2d, 0x95, 0x5f, 0x3e, 0x35, 0xf3, 0x2d, + 0x2b, 0x94, 0x8b, 0x73, 0xf6, 0x13, 0x98, 0xe7, 0xfb, 0x6d, 0x39, 0xb3, 0x2f, 0x53, 0x5b, 0x2e, + 0x43, 0x9d, 0x78, 0x19, 0xb1, 0x01, 0xc5, 0x3e, 0x18, 0x4b, 0xd3, 0x37, 0xef, 0xb8, 0xe5, 0x77, + 0xa4, 0x8b, 0x77, 0xfa, 0x5f, 0x69, 0x00, 0x7b, 0x27, 0xde, 0xe8, 0x1e, 0x33, 0xd2, 0xb7, 0xa1, + 0x36, 0xeb, 0xba, 0x07, 0xe9, 0x4d, 0x75, 0x8b, 0xf6, 0x2c, 0x21, 0x5c, 0xa9, 0x7a, 0xae, 0x66, + 0xab, 0xe7, 0xa2, 0xba, 0xb7, 0xd8, 0x31, 0xfd, 0xbd, 0x06, 0x57, 0x08, 0x11, 0x2f, 0x25, 0xd1, + 0x29, 0xc5, 0xe1, 0x94, 0xd3, 0xab, 0xca, 0x4e, 0xef, 0x2e, 0xcc, 0xb3, 0x02, 0x56, 0x24, 0x1d, + 0xd7, 0x8b, 0x58, 0xc6, 0x18, 0x6c, 0x88, 0xee, 0xfa, 0x53, 0xe8, 0x18, 0x69, 0x49, 0x20, 0x04, + 0xb5, 0xd4, 0x7d, 0x23, 0xfa, 0x9b, 0x96, 0x00, 0xe6, 0xc4, 0x1c, 0xd9, 0xd1, 0x09, 0x25, 0xac, + 0x6e, 0xc4, 0xdf, 0x6a, 0xb1, 0xeb, 0xff, 0xab, 0xc1, 0x65, 0x71, 0xe0, 0xc0, 0x95, 0xea, 0xfc, + 0xbc, 0xd9, 0x84, 0x15, 0xae, 0x41, 0x19, 0x55, 0x62, 0xb9, 0xca, 0x12, 0x83, 0xc9, 0xcb, 0xd8, + 0x84, 0x95, 0xc8, 0x0c, 0xc6, 0x38, 0xca, 0x8e, 0x61, 0x9c, 0x5b, 0x62, 0x8d, 0xf2, 0x98, 0x32, + 0x07, 0x3e, 0x37, 0xd8, 0x41, 0x3d, 0x77, 0x08, 0x5c, 0x27, 0xc0, 0x9b, 0xba, 0x7c, 0x95, 0xfa, + 0x31, 0x5c, 0x65, 0x37, 0xfe, 0xf6, 0x65, 0x8a, 0x2e, 0xb4, 0xdf, 0xaa, 0x5c, 0x77, 0xc6, 0x84, + 0xfe, 0x58, 0x83, 0x6b, 0x05, 0x98, 0x2f, 0x92, 0x2c, 0x3f, 0x52, 0x62, 0x2f, 0xa8, 0x0b, 0x24, + 0xbc, 0x34, 0xab, 0xcd, 0x10, 0xf9, 0xb3, 0x1a, 0x2c, 0xe6, 0x3a, 0x9d, 0x59, 0xe7, 0xde, 0x04, + 0x44, 0x84, 0x10, 0x3f, 0x20, 0xa1, 0xa5, 0x16, 0xf7, 0xd5, 0x3d, 0x6f, 0xea, 0xc6, 0x8f, 0x47, + 0x48, 0xb5, 0x85, 0x6c, 0xd6, 0x9b, 0xed, 0xb6, 0xc6, 0x92, 0xab, 0x15, 0xdf, 0x3e, 0xce, 0x11, + 0xb8, 0xbe, 0x3b, 0x75, 0xd9, 0xc6, 0x2c, 0x97, 0x32, 0xf3, 0xbf, 0x04, 0x95, 0x04, 0x46, 0x07, + 0xb0, 0x48, 0xaf, 0x71, 0x4c, 0xa3, 0xb1, 0x4f, 0xf2, 0x55, 0x4a, 0x17, 0xf3, 0xf2, 0xdf, 0x28, + 0x8d, 0xe9, 0x03, 0x3e, 0x9a, 0x10, 0xcf, 0x53, 0x56, 0x4f, 0x86, 0x0a, 0x3c, 0xb6, 0x37, 0xf2, + 0xdd, 0x18, 0xcf, 0xdc, 0x19, 0xf1, 0xec, 0xf0, 0xd1, 0x32, 0x9e, 0x34, 0x74, 0xb0, 0x05, 0x2b, + 0xca, 0xa5, 0xcf, 0x8a, 0x2b, 0xf5, 0x74, 0xfa, 0x7b, 0x1f, 0x96, 0x55, 0xab, 0x3a, 0xc7, 0x1c, + 0x39, 0x8a, 0xcf, 0x32, 0xc7, 0xed, 0x5f, 0x81, 0x66, 0x7c, 0x5c, 0x86, 0x5a, 0x30, 0xff, 0xd4, + 0x7b, 0xcf, 0xf3, 0x8f, 0xbd, 0xde, 0x25, 0x34, 0x0f, 0xd5, 0x7b, 0x8e, 0xd3, 0xd3, 0x50, 0x07, + 0x9a, 0x7b, 0x51, 0x80, 0x4d, 0x82, 0xa4, 0x57, 0x41, 0x5d, 0x80, 0x77, 0xed, 0x30, 0xf2, 0x03, + 0x7b, 0x64, 0x3a, 0xbd, 0xea, 0xed, 0x4f, 0xa0, 0x2b, 0xef, 0x48, 0xa1, 0x36, 0x34, 0x76, 0xfd, + 0xe8, 0xbb, 0x1f, 0xdb, 0x61, 0xd4, 0xbb, 0x44, 0xfa, 0xef, 0xfa, 0xd1, 0xe3, 0x00, 0x87, 0xd8, + 0x8b, 0x7a, 0x1a, 0x02, 0x98, 0xfb, 0xc0, 0xdb, 0xb6, 0xc3, 0xe7, 0xbd, 0x0a, 0x5a, 0xe2, 0x9b, + 0xcd, 0xa6, 0xb3, 0xc3, 0xb7, 0x79, 0x7a, 0x55, 0x32, 0x3c, 0xfe, 0xaa, 0xa1, 0x1e, 0xb4, 0xe3, + 0x2e, 0x0f, 0x1f, 0x3f, 0xed, 0xd5, 0x51, 0x13, 0xea, 0xec, 0xe7, 0xdc, 0x6d, 0x0b, 0x7a, 0xd9, + 0x43, 0x12, 0x32, 0x27, 0x5b, 0x44, 0x0c, 0xea, 0x5d, 0x22, 0x2b, 0xe3, 0xa7, 0x54, 0x3d, 0x0d, + 0x2d, 0x40, 0x2b, 0x75, 0xe6, 0xd3, 0xab, 0x10, 0xc0, 0xc3, 0x60, 0x32, 0xe2, 0xde, 0x88, 0x91, + 0x40, 0xd8, 0xb9, 0x4d, 0x38, 0x51, 0xbb, 0x7d, 0x1f, 0x1a, 0x22, 0x69, 0x23, 0x5d, 0x39, 0x8b, + 0xc8, 0x67, 0xef, 0x12, 0x5a, 0x84, 0x8e, 0x74, 0x31, 0xbf, 0xa7, 0x21, 0x04, 0x5d, 0xf9, 0xe9, + 0x4c, 0xaf, 0x72, 0x7b, 0x13, 0x20, 0x49, 0x7e, 0x08, 0x39, 0x3b, 0xde, 0x91, 0xe9, 0xd8, 0x16, + 0xa3, 0x8d, 0x34, 0x11, 0xee, 0x52, 0xee, 0x30, 0xcd, 0xea, 0x55, 0x6e, 0xdf, 0x80, 0x86, 0x08, + 0xe8, 0x04, 0x6e, 0x60, 0xd7, 0x3f, 0xc2, 0x4c, 0x32, 0x7b, 0x38, 0xea, 0x69, 0x9b, 0xff, 0xbe, + 0x04, 0xc0, 0xce, 0x35, 0x7c, 0x3f, 0xb0, 0x90, 0x03, 0xe8, 0x21, 0x8e, 0xb6, 0x7c, 0x77, 0xe2, + 0x7b, 0x62, 0xd3, 0x35, 0x44, 0xeb, 0x99, 0x7a, 0x89, 0x7d, 0xe4, 0x3b, 0xf2, 0xd5, 0x0f, 0x5e, + 0x55, 0xf6, 0xcf, 0x74, 0xd6, 0x2f, 0x21, 0x97, 0x62, 0x7b, 0x62, 0xbb, 0xf8, 0x89, 0x3d, 0x7a, + 0x1e, 0x1f, 0x86, 0x14, 0x3f, 0x5a, 0xc9, 0x74, 0x15, 0xf8, 0x6e, 0x2a, 0xf1, 0xed, 0x45, 0x81, + 0xed, 0x8d, 0x85, 0x97, 0xd6, 0x2f, 0xa1, 0x17, 0x99, 0x27, 0x33, 0x02, 0xe1, 0x66, 0x99, 0x57, + 0x32, 0xe7, 0x43, 0xe9, 0xc0, 0x42, 0xe6, 0x15, 0x21, 0xba, 0xad, 0xbe, 0xc2, 0xac, 0x7a, 0xf1, + 0x38, 0x78, 0xa3, 0x54, 0xdf, 0x18, 0x9b, 0x0d, 0x5d, 0xf9, 0xa5, 0x1c, 0xfa, 0xa5, 0xa2, 0x09, + 0x72, 0x8f, 0x28, 0x06, 0xb7, 0xcb, 0x74, 0x8d, 0x51, 0x7d, 0xc8, 0x14, 0x74, 0x16, 0x2a, 0xe5, + 0x83, 0x93, 0xc1, 0x69, 0x01, 0x52, 0xbf, 0x84, 0x7e, 0x40, 0x62, 0x59, 0xe6, 0xa9, 0x07, 0x7a, + 0x53, 0xed, 0x7f, 0xd5, 0x2f, 0x42, 0x66, 0x61, 0xf8, 0x30, 0x6b, 0x5e, 0xc5, 0xd4, 0xe7, 0x1e, + 0x7f, 0x95, 0xa7, 0x3e, 0x35, 0xfd, 0x69, 0xd4, 0x9f, 0x19, 0x83, 0xc3, 0x12, 0x64, 0xc5, 0x25, + 0xf3, 0xac, 0x2a, 0x27, 0xf9, 0x69, 0xf1, 0x8d, 0xf4, 0x59, 0xd8, 0xa6, 0xd4, 0x48, 0xb3, 0x07, + 0x7a, 0x6f, 0xa9, 0x10, 0x15, 0xbe, 0x6e, 0x19, 0xac, 0x97, 0xed, 0x9e, 0xd6, 0x65, 0xf9, 0x01, + 0x85, 0x5a, 0x44, 0xca, 0x47, 0x1f, 0x6a, 0x5d, 0x56, 0xbf, 0xc7, 0xd0, 0x2f, 0xa1, 0x27, 0x92, + 0x33, 0x47, 0xaf, 0x15, 0xa9, 0x82, 0x7c, 0xc2, 0x3f, 0x8b, 0x6f, 0xbf, 0x09, 0x88, 0x59, 0xaa, + 0x77, 0x60, 0x8f, 0xa7, 0x81, 0xc9, 0xd4, 0xb8, 0xc8, 0xb9, 0xe5, 0xbb, 0x0a, 0x34, 0x5f, 0x39, + 0xc3, 0x88, 0x78, 0x49, 0x43, 0x80, 0x87, 0x38, 0x7a, 0x1f, 0x47, 0x81, 0x3d, 0x0a, 0xb3, 0x2b, + 0x4a, 0xfc, 0x37, 0xef, 0x20, 0x50, 0xbd, 0x3e, 0xb3, 0x5f, 0x8c, 0x60, 0x1f, 0x5a, 0x0f, 0x49, + 0x9e, 0x4f, 0x73, 0x97, 0x10, 0x15, 0x8e, 0x14, 0x3d, 0x04, 0x8a, 0xb5, 0xd9, 0x1d, 0xd3, 0xce, + 0x33, 0xf3, 0x98, 0x04, 0x15, 0x0a, 0x36, 0xff, 0xc4, 0x45, 0xed, 0x3c, 0x0b, 0x5e, 0xa7, 0xb0, + 0x15, 0x6d, 0x1d, 0xe2, 0xd1, 0xf3, 0x77, 0xb1, 0xe9, 0x44, 0x87, 0x05, 0x2b, 0x4a, 0xf5, 0x38, + 0x7d, 0x45, 0x52, 0xc7, 0x18, 0x07, 0x86, 0x25, 0x66, 0x85, 0x72, 0x81, 0xb4, 0xa1, 0x9e, 0x22, + 0xdf, 0xb3, 0xa4, 0xea, 0x99, 0xb0, 0xb8, 0x1d, 0xf8, 0x13, 0x19, 0xc9, 0x5b, 0x4a, 0x24, 0xb9, + 0x7e, 0x25, 0x51, 0x7c, 0x0f, 0xda, 0xa2, 0x0e, 0xa5, 0x99, 0xb3, 0x9a, 0x0b, 0xe9, 0x2e, 0x25, + 0x27, 0xfe, 0x08, 0x16, 0x32, 0x05, 0xae, 0x5a, 0xe8, 0xea, 0x2a, 0x78, 0xd6, 0xec, 0xc7, 0x80, + 0xe8, 0x0b, 0x21, 0xe9, 0x75, 0x62, 0x41, 0x7e, 0x93, 0xef, 0x28, 0x90, 0x6c, 0x94, 0xee, 0x1f, + 0x4b, 0xfe, 0xb7, 0x60, 0x45, 0x59, 0x44, 0x66, 0x1d, 0x02, 0xbf, 0x7e, 0x76, 0x4a, 0xa5, 0x9b, + 0x75, 0x08, 0xa7, 0x8e, 0x10, 0xf8, 0x37, 0x7f, 0xb4, 0x00, 0x4d, 0x9a, 0xe7, 0x51, 0x69, 0xfd, + 0x22, 0xcd, 0x7b, 0xb9, 0x69, 0xde, 0x47, 0xb0, 0x90, 0x79, 0xda, 0xa2, 0x56, 0x5a, 0xf5, 0xfb, + 0x97, 0x12, 0xd9, 0x8a, 0xfc, 0xb8, 0x44, 0x1d, 0x0a, 0x95, 0x0f, 0x50, 0x66, 0xcd, 0xfd, 0x8c, + 0xbd, 0x0a, 0x8b, 0xcf, 0x59, 0x5f, 0x2f, 0xdc, 0x73, 0x95, 0xaf, 0xe6, 0x7d, 0xf6, 0x59, 0xd0, + 0x17, 0x3b, 0x03, 0xfd, 0x08, 0x16, 0x32, 0x97, 0xaf, 0xd5, 0x1a, 0xa3, 0xbe, 0xa1, 0x3d, 0x6b, + 0xf6, 0x9f, 0x63, 0xf2, 0x64, 0xc1, 0x92, 0xe2, 0x5e, 0x2c, 0x5a, 0x2f, 0x4a, 0x44, 0xd5, 0x17, + 0x68, 0x67, 0x2f, 0xa8, 0x23, 0x99, 0x69, 0x36, 0xde, 0x24, 0x44, 0x66, 0xff, 0xd6, 0x60, 0xf0, + 0x66, 0xb9, 0xff, 0x40, 0x88, 0x17, 0xb4, 0x07, 0x73, 0xec, 0x4a, 0x36, 0x7a, 0x45, 0x7d, 0x16, + 0x99, 0xba, 0xae, 0x3d, 0x98, 0x75, 0xa9, 0x3b, 0x9c, 0x3a, 0x51, 0x48, 0x27, 0xad, 0x53, 0xef, + 0x8b, 0x94, 0xcf, 0x08, 0xd2, 0xf7, 0xa8, 0x07, 0xb3, 0xaf, 0x4e, 0x8b, 0x49, 0xff, 0x7f, 0x67, + 0x98, 0x1f, 0xc3, 0x92, 0xe2, 0x86, 0x02, 0x2a, 0xaa, 0x24, 0x0a, 0xee, 0x46, 0x0c, 0x36, 0x4a, + 0xf7, 0x8f, 0x31, 0x7f, 0x1f, 0x7a, 0xd9, 0x03, 0x08, 0xf4, 0x46, 0x91, 0x3e, 0xab, 0x70, 0x9e, + 0xae, 0xcc, 0xf7, 0xbf, 0xfa, 0xe1, 0xe6, 0xd8, 0x8e, 0x0e, 0xa7, 0xfb, 0xa4, 0x65, 0x83, 0x75, + 0x7d, 0xcb, 0xf6, 0xf9, 0xaf, 0x0d, 0xc1, 0xff, 0x0d, 0x3a, 0x7a, 0x83, 0xa2, 0x9a, 0xec, 0xef, + 0xcf, 0xd1, 0xcf, 0x3b, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x13, 0xb4, 0x07, 0x9f, 0x49, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4373,6 +4440,7 @@ type QueryCoordClient interface { ReleasePartitions(ctx context.Context, in *ReleasePartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) LoadCollection(ctx context.Context, in *LoadCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) ReleaseCollection(ctx context.Context, in *ReleaseCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + SyncNewCreatedPartition(ctx context.Context, in *SyncNewCreatedPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) GetPartitionStates(ctx context.Context, in *GetPartitionStatesRequest, opts ...grpc.CallOption) (*GetPartitionStatesResponse, error) GetSegmentInfo(ctx context.Context, in *GetSegmentInfoRequest, opts ...grpc.CallOption) (*GetSegmentInfoResponse, error) LoadBalance(ctx context.Context, in *LoadBalanceRequest, opts ...grpc.CallOption) (*commonpb.Status, error) @@ -4480,6 +4548,15 @@ func (c *queryCoordClient) ReleaseCollection(ctx context.Context, in *ReleaseCol return out, nil } +func (c *queryCoordClient) SyncNewCreatedPartition(ctx context.Context, in *SyncNewCreatedPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/SyncNewCreatedPartition", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryCoordClient) GetPartitionStates(ctx context.Context, in *GetPartitionStatesRequest, opts ...grpc.CallOption) (*GetPartitionStatesResponse, error) { out := new(GetPartitionStatesResponse) err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/GetPartitionStates", in, out, opts...) @@ -4617,6 +4694,7 @@ type QueryCoordServer interface { ReleasePartitions(context.Context, *ReleasePartitionsRequest) (*commonpb.Status, error) LoadCollection(context.Context, *LoadCollectionRequest) (*commonpb.Status, error) ReleaseCollection(context.Context, *ReleaseCollectionRequest) (*commonpb.Status, error) + SyncNewCreatedPartition(context.Context, *SyncNewCreatedPartitionRequest) (*commonpb.Status, error) GetPartitionStates(context.Context, *GetPartitionStatesRequest) (*GetPartitionStatesResponse, error) GetSegmentInfo(context.Context, *GetSegmentInfoRequest) (*GetSegmentInfoResponse, error) LoadBalance(context.Context, *LoadBalanceRequest) (*commonpb.Status, error) @@ -4666,6 +4744,9 @@ func (*UnimplementedQueryCoordServer) LoadCollection(ctx context.Context, req *L func (*UnimplementedQueryCoordServer) ReleaseCollection(ctx context.Context, req *ReleaseCollectionRequest) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method ReleaseCollection not implemented") } +func (*UnimplementedQueryCoordServer) SyncNewCreatedPartition(ctx context.Context, req *SyncNewCreatedPartitionRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncNewCreatedPartition not implemented") +} func (*UnimplementedQueryCoordServer) GetPartitionStates(ctx context.Context, req *GetPartitionStatesRequest) (*GetPartitionStatesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPartitionStates not implemented") } @@ -4875,6 +4956,24 @@ func _QueryCoord_ReleaseCollection_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _QueryCoord_SyncNewCreatedPartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SyncNewCreatedPartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).SyncNewCreatedPartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/SyncNewCreatedPartition", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).SyncNewCreatedPartition(ctx, req.(*SyncNewCreatedPartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _QueryCoord_GetPartitionStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetPartitionStatesRequest) if err := dec(in); err != nil { @@ -5167,6 +5266,10 @@ var _QueryCoord_serviceDesc = grpc.ServiceDesc{ MethodName: "ReleaseCollection", Handler: _QueryCoord_ReleaseCollection_Handler, }, + { + MethodName: "SyncNewCreatedPartition", + Handler: _QueryCoord_SyncNewCreatedPartition_Handler, + }, { MethodName: "GetPartitionStates", Handler: _QueryCoord_GetPartitionStates_Handler, @@ -5239,6 +5342,7 @@ type QueryNodeClient interface { UnsubDmChannel(ctx context.Context, in *UnsubDmChannelRequest, opts ...grpc.CallOption) (*commonpb.Status, error) LoadSegments(ctx context.Context, in *LoadSegmentsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) ReleaseCollection(ctx context.Context, in *ReleaseCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + LoadPartitions(ctx context.Context, in *LoadPartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) ReleasePartitions(ctx context.Context, in *ReleasePartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) ReleaseSegments(ctx context.Context, in *ReleaseSegmentsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) GetSegmentInfo(ctx context.Context, in *GetSegmentInfoRequest, opts ...grpc.CallOption) (*GetSegmentInfoResponse, error) @@ -5324,6 +5428,15 @@ func (c *queryNodeClient) ReleaseCollection(ctx context.Context, in *ReleaseColl return out, nil } +func (c *queryNodeClient) LoadPartitions(ctx context.Context, in *LoadPartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryNode/LoadPartitions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *queryNodeClient) ReleasePartitions(ctx context.Context, in *ReleasePartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { out := new(commonpb.Status) err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryNode/ReleasePartitions", in, out, opts...) @@ -5432,6 +5545,7 @@ type QueryNodeServer interface { UnsubDmChannel(context.Context, *UnsubDmChannelRequest) (*commonpb.Status, error) LoadSegments(context.Context, *LoadSegmentsRequest) (*commonpb.Status, error) ReleaseCollection(context.Context, *ReleaseCollectionRequest) (*commonpb.Status, error) + LoadPartitions(context.Context, *LoadPartitionsRequest) (*commonpb.Status, error) ReleasePartitions(context.Context, *ReleasePartitionsRequest) (*commonpb.Status, error) ReleaseSegments(context.Context, *ReleaseSegmentsRequest) (*commonpb.Status, error) GetSegmentInfo(context.Context, *GetSegmentInfoRequest) (*GetSegmentInfoResponse, error) @@ -5471,6 +5585,9 @@ func (*UnimplementedQueryNodeServer) LoadSegments(ctx context.Context, req *Load func (*UnimplementedQueryNodeServer) ReleaseCollection(ctx context.Context, req *ReleaseCollectionRequest) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method ReleaseCollection not implemented") } +func (*UnimplementedQueryNodeServer) LoadPartitions(ctx context.Context, req *LoadPartitionsRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadPartitions not implemented") +} func (*UnimplementedQueryNodeServer) ReleasePartitions(ctx context.Context, req *ReleasePartitionsRequest) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method ReleasePartitions not implemented") } @@ -5635,6 +5752,24 @@ func _QueryNode_ReleaseCollection_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _QueryNode_LoadPartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoadPartitionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryNodeServer).LoadPartitions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryNode/LoadPartitions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryNodeServer).LoadPartitions(ctx, req.(*LoadPartitionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _QueryNode_ReleasePartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReleasePartitionsRequest) if err := dec(in); err != nil { @@ -5865,6 +6000,10 @@ var _QueryNode_serviceDesc = grpc.ServiceDesc{ MethodName: "ReleaseCollection", Handler: _QueryNode_ReleaseCollection_Handler, }, + { + MethodName: "LoadPartitions", + Handler: _QueryNode_LoadPartitions_Handler, + }, { MethodName: "ReleasePartitions", Handler: _QueryNode_ReleasePartitions_Handler, diff --git a/internal/proxy/proxy_test.go b/internal/proxy/proxy_test.go index 6381309676..384bd9ae0e 100644 --- a/internal/proxy/proxy_test.go +++ b/internal/proxy/proxy_test.go @@ -1980,7 +1980,7 @@ func TestProxy(t *testing.T) { Type: milvuspb.ShowType_InMemory, }) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) // default partition assert.Equal(t, 0, len(resp.PartitionNames)) diff --git a/internal/proxy/query_node_mock_test.go b/internal/proxy/query_node_mock_test.go index adea72af73..0754084800 100644 --- a/internal/proxy/query_node_mock_test.go +++ b/internal/proxy/query_node_mock_test.go @@ -85,6 +85,10 @@ func (m *QueryNodeMock) ReleaseCollection(ctx context.Context, req *querypb.Rele return nil, nil } +func (m *QueryNodeMock) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + return nil, nil +} + // TODO func (m *QueryNodeMock) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { return nil, nil diff --git a/internal/proxy/task.go b/internal/proxy/task.go index dbcc643fa9..0f520d95e3 100644 --- a/internal/proxy/task.go +++ b/internal/proxy/task.go @@ -868,32 +868,6 @@ func (dpt *dropPartitionTask) PreExecute(ctx context.Context) error { return err } - collID, err := globalMetaCache.GetCollectionID(ctx, dpt.GetCollectionName()) - if err != nil { - return err - } - partID, err := globalMetaCache.GetPartitionID(ctx, dpt.GetCollectionName(), dpt.GetPartitionName()) - if err != nil { - if err.Error() == ErrPartitionNotExist(dpt.GetPartitionName()).Error() { - return nil - } - return err - } - - collLoaded, err := isCollectionLoaded(ctx, dpt.queryCoord, collID) - if err != nil { - return err - } - if collLoaded { - loaded, err := isPartitionLoaded(ctx, dpt.queryCoord, collID, []int64{partID}) - if err != nil { - return err - } - if loaded { - return errors.New("partition cannot be dropped, partition is loaded, please release it first") - } - } - return nil } @@ -1587,6 +1561,9 @@ func (lpt *loadPartitionsTask) Execute(ctx context.Context) error { } partitionIDs = append(partitionIDs, partitionID) } + if len(partitionIDs) == 0 { + return errors.New("failed to load partition, due to no partition specified") + } request := &querypb.LoadPartitionsRequest{ Base: commonpbutil.UpdateMsgBase( lpt.Base, diff --git a/internal/proxy/task_test.go b/internal/proxy/task_test.go index 5e08a06924..0d3dcf6550 100644 --- a/internal/proxy/task_test.go +++ b/internal/proxy/task_test.go @@ -1134,20 +1134,6 @@ func TestDropPartitionTask(t *testing.T) { err = task.PreExecute(ctx) assert.NotNil(t, err) - t.Run("get collectionID error", func(t *testing.T) { - mockCache := newMockCache() - mockCache.setGetPartitionIDFunc(func(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) { - return 1, nil - }) - mockCache.setGetIDFunc(func(ctx context.Context, collectionName string) (typeutil.UniqueID, error) { - return 0, errors.New("error") - }) - globalMetaCache = mockCache - task.PartitionName = "partition1" - err = task.PreExecute(ctx) - assert.Error(t, err) - }) - t.Run("partition not exist", func(t *testing.T) { task.PartitionName = "partition2" @@ -1162,21 +1148,6 @@ func TestDropPartitionTask(t *testing.T) { err = task.PreExecute(ctx) assert.NoError(t, err) }) - - t.Run("get partition error", func(t *testing.T) { - task.PartitionName = "partition3" - - mockCache := newMockCache() - mockCache.setGetPartitionIDFunc(func(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) { - return 0, errors.New("error") - }) - mockCache.setGetIDFunc(func(ctx context.Context, collectionName string) (typeutil.UniqueID, error) { - return 1, nil - }) - globalMetaCache = mockCache - err = task.PreExecute(ctx) - assert.Error(t, err) - }) } func TestHasPartitionTask(t *testing.T) { diff --git a/internal/querycoordv2/balance/rowcount_based_balancer.go b/internal/querycoordv2/balance/rowcount_based_balancer.go index 44b72aa06f..b823a6999c 100644 --- a/internal/querycoordv2/balance/rowcount_based_balancer.go +++ b/internal/querycoordv2/balance/rowcount_based_balancer.go @@ -19,13 +19,14 @@ package balance import ( "sort" + "github.com/samber/lo" + "go.uber.org/zap" + "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/querycoordv2/meta" "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/querycoordv2/task" - "github.com/samber/lo" - "go.uber.org/zap" ) type RowCountBasedBalancer struct { diff --git a/internal/querycoordv2/balance/rowcount_based_balancer_test.go b/internal/querycoordv2/balance/rowcount_based_balancer_test.go index e3e520e5d2..2c840f65a4 100644 --- a/internal/querycoordv2/balance/rowcount_based_balancer_test.go +++ b/internal/querycoordv2/balance/rowcount_based_balancer_test.go @@ -69,6 +69,8 @@ func (suite *RowCountBasedBalancerTestSuite) SetupTest() { distManager := meta.NewDistributionManager() suite.mockScheduler = task.NewMockScheduler(suite.T()) suite.balancer = NewRowCountBasedBalancer(suite.mockScheduler, nodeManager, distManager, testMeta, testTarget) + + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil).Maybe() } func (suite *RowCountBasedBalancerTestSuite) TearDownTest() { @@ -257,6 +259,7 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() { balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1) collection.LoadPercentage = 100 collection.Status = querypb.LoadStatus_Loaded + collection.LoadType = querypb.LoadType_LoadCollection balancer.meta.CollectionManager.PutCollection(collection) balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...))) for node, s := range c.distributions { @@ -359,6 +362,7 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalanceOutboundNodes() { balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1) collection.LoadPercentage = 100 collection.Status = querypb.LoadStatus_Loaded + collection.LoadType = querypb.LoadType_LoadCollection balancer.meta.CollectionManager.PutCollection(collection) balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...))) for node, s := range c.distributions { @@ -415,6 +419,7 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalanceOnLoadingCollection() { collection := utils.CreateTestCollection(1, 1) collection.LoadPercentage = 100 collection.Status = querypb.LoadStatus_Loading + collection.LoadType = querypb.LoadType_LoadCollection balancer.meta.CollectionManager.PutCollection(collection) balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, c.nodes)) for node, s := range c.distributions { diff --git a/internal/querycoordv2/checkers/channel_checker_test.go b/internal/querycoordv2/checkers/channel_checker_test.go index 54321ee774..b95eedf9dc 100644 --- a/internal/querycoordv2/checkers/channel_checker_test.go +++ b/internal/querycoordv2/checkers/channel_checker_test.go @@ -74,6 +74,8 @@ func (suite *ChannelCheckerTestSuite) SetupTest() { balancer := suite.createMockBalancer() suite.checker = NewChannelChecker(suite.meta, distManager, targetManager, balancer) + + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil).Maybe() } func (suite *ChannelCheckerTestSuite) TearDownTest() { diff --git a/internal/querycoordv2/checkers/segment_checker_test.go b/internal/querycoordv2/checkers/segment_checker_test.go index 40626db2ad..5294368ae7 100644 --- a/internal/querycoordv2/checkers/segment_checker_test.go +++ b/internal/querycoordv2/checkers/segment_checker_test.go @@ -74,6 +74,8 @@ func (suite *SegmentCheckerTestSuite) SetupTest() { balancer := suite.createMockBalancer() suite.checker = NewSegmentChecker(suite.meta, distManager, targetManager, balancer) + + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil).Maybe() } func (suite *SegmentCheckerTestSuite) TearDownTest() { diff --git a/internal/querycoordv2/job/errors.go b/internal/querycoordv2/job/errors.go index b2540fa15d..527e781d2b 100644 --- a/internal/querycoordv2/job/errors.go +++ b/internal/querycoordv2/job/errors.go @@ -26,4 +26,5 @@ var ( ErrCollectionLoaded = errors.New("CollectionLoaded") ErrLoadParameterMismatched = errors.New("LoadParameterMismatched") ErrNoEnoughNode = errors.New("NoEnoughNode") + ErrPartitionNotInTarget = errors.New("PartitionNotInLoadingTarget") ) diff --git a/internal/querycoordv2/job/job.go b/internal/querycoordv2/job/job.go index f0ba14c2d3..9b8b28b18e 100644 --- a/internal/querycoordv2/job/job.go +++ b/internal/querycoordv2/job/job.go @@ -18,20 +18,6 @@ package job import ( "context" - "fmt" - "time" - - "github.com/samber/lo" - "go.uber.org/zap" - - "github.com/milvus-io/milvus/internal/log" - "github.com/milvus-io/milvus/internal/metrics" - "github.com/milvus-io/milvus/internal/proto/querypb" - "github.com/milvus-io/milvus/internal/querycoordv2/meta" - "github.com/milvus-io/milvus/internal/querycoordv2/observers" - "github.com/milvus-io/milvus/internal/querycoordv2/session" - "github.com/milvus-io/milvus/internal/querycoordv2/utils" - "github.com/milvus-io/milvus/internal/util/typeutil" ) // Job is request of loading/releasing collection/partitions, @@ -106,439 +92,3 @@ func (job *BaseJob) PreExecute() error { } func (job *BaseJob) PostExecute() {} - -type LoadCollectionJob struct { - *BaseJob - req *querypb.LoadCollectionRequest - - dist *meta.DistributionManager - meta *meta.Meta - targetMgr *meta.TargetManager - broker meta.Broker - nodeMgr *session.NodeManager -} - -func NewLoadCollectionJob( - ctx context.Context, - req *querypb.LoadCollectionRequest, - dist *meta.DistributionManager, - meta *meta.Meta, - targetMgr *meta.TargetManager, - broker meta.Broker, - nodeMgr *session.NodeManager, -) *LoadCollectionJob { - return &LoadCollectionJob{ - BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), - req: req, - dist: dist, - meta: meta, - targetMgr: targetMgr, - broker: broker, - nodeMgr: nodeMgr, - } -} - -func (job *LoadCollectionJob) PreExecute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - ) - - if req.GetReplicaNumber() <= 0 { - log.Info("request doesn't indicate the number of replicas, set it to 1", - zap.Int32("replicaNumber", req.GetReplicaNumber())) - req.ReplicaNumber = 1 - } - - if job.meta.Exist(req.GetCollectionID()) { - old := job.meta.GetCollection(req.GetCollectionID()) - if old == nil { - msg := "load the partition after load collection is not supported" - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } else if old.GetReplicaNumber() != req.GetReplicaNumber() { - msg := fmt.Sprintf("collection with different replica number %d existed, release this collection first before changing its replica number", - job.meta.GetReplicaNumber(req.GetCollectionID()), - ) - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } else if !typeutil.MapEqual(old.GetFieldIndexID(), req.GetFieldIndexID()) { - msg := fmt.Sprintf("collection with different index %v existed, release this collection first before changing its index", - old.GetFieldIndexID()) - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } - - return ErrCollectionLoaded - } - - return nil -} - -func (job *LoadCollectionJob) Execute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - ) - - meta.GlobalFailedLoadCache.Remove(req.GetCollectionID()) - - // Clear stale replicas - err := job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - log.Warn("failed to clear stale replicas", zap.Error(err)) - return err - } - - // Create replicas - replicas, err := utils.SpawnReplicasWithRG(job.meta, - req.GetCollectionID(), - req.GetResourceGroups(), - req.GetReplicaNumber(), - ) - if err != nil { - msg := "failed to spawn replica for collection" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - for _, replica := range replicas { - log.Info("replica created", - zap.Int64("replicaID", replica.GetID()), - zap.Int64s("nodes", replica.GetNodes()), - zap.String("resourceGroup", replica.GetResourceGroup())) - } - - // Fetch channels and segments from DataCoord - partitionIDs, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID()) - if err != nil { - msg := "failed to get partitions from RootCoord" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - - // It's safe here to call UpdateCollectionNextTargetWithPartitions, as the collection not existing - err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), partitionIDs...) - if err != nil { - msg := "failed to update next targets for collection" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - - err = job.meta.CollectionManager.PutCollection(&meta.Collection{ - CollectionLoadInfo: &querypb.CollectionLoadInfo{ - CollectionID: req.GetCollectionID(), - ReplicaNumber: req.GetReplicaNumber(), - Status: querypb.LoadStatus_Loading, - FieldIndexID: req.GetFieldIndexID(), - }, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - }) - if err != nil { - msg := "failed to store collection" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - - metrics.QueryCoordNumCollections.WithLabelValues().Inc() - return nil -} - -func (job *LoadCollectionJob) PostExecute() { - if job.Error() != nil && !job.meta.Exist(job.CollectionID()) { - job.meta.ReplicaManager.RemoveCollection(job.CollectionID()) - job.targetMgr.RemoveCollection(job.req.GetCollectionID()) - } -} - -type ReleaseCollectionJob struct { - *BaseJob - req *querypb.ReleaseCollectionRequest - dist *meta.DistributionManager - meta *meta.Meta - targetMgr *meta.TargetManager - targetObserver *observers.TargetObserver -} - -func NewReleaseCollectionJob(ctx context.Context, - req *querypb.ReleaseCollectionRequest, - dist *meta.DistributionManager, - meta *meta.Meta, - targetMgr *meta.TargetManager, - targetObserver *observers.TargetObserver, -) *ReleaseCollectionJob { - return &ReleaseCollectionJob{ - BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), - req: req, - dist: dist, - meta: meta, - targetMgr: targetMgr, - targetObserver: targetObserver, - } -} - -func (job *ReleaseCollectionJob) Execute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - ) - if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { - log.Info("release collection end, the collection has not been loaded into QueryNode") - return nil - } - - err := job.meta.CollectionManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - msg := "failed to remove collection" - log.Warn(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - - err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - msg := "failed to remove replicas" - log.Warn(msg, zap.Error(err)) - } - - job.targetMgr.RemoveCollection(req.GetCollectionID()) - job.targetObserver.ReleaseCollection(req.GetCollectionID()) - waitCollectionReleased(job.dist, req.GetCollectionID()) - metrics.QueryCoordNumCollections.WithLabelValues().Dec() - return nil -} - -type LoadPartitionJob struct { - *BaseJob - req *querypb.LoadPartitionsRequest - - dist *meta.DistributionManager - meta *meta.Meta - targetMgr *meta.TargetManager - broker meta.Broker - nodeMgr *session.NodeManager -} - -func NewLoadPartitionJob( - ctx context.Context, - req *querypb.LoadPartitionsRequest, - dist *meta.DistributionManager, - meta *meta.Meta, - targetMgr *meta.TargetManager, - broker meta.Broker, - nodeMgr *session.NodeManager, -) *LoadPartitionJob { - return &LoadPartitionJob{ - BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), - req: req, - dist: dist, - meta: meta, - targetMgr: targetMgr, - broker: broker, - nodeMgr: nodeMgr, - } -} - -func (job *LoadPartitionJob) PreExecute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - ) - - if req.GetReplicaNumber() <= 0 { - log.Info("request doesn't indicate the number of replicas, set it to 1", - zap.Int32("replicaNumber", req.GetReplicaNumber())) - req.ReplicaNumber = 1 - } - - if job.meta.Exist(req.GetCollectionID()) { - old := job.meta.GetCollection(req.GetCollectionID()) - if old != nil { - msg := "load the partition after load collection is not supported" - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } else if job.meta.GetReplicaNumber(req.GetCollectionID()) != req.GetReplicaNumber() { - msg := "collection with different replica number existed, release this collection first before changing its replica number" - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } else if !typeutil.MapEqual(job.meta.GetFieldIndex(req.GetCollectionID()), req.GetFieldIndexID()) { - msg := fmt.Sprintf("collection with different index %v existed, release this collection first before changing its index", - job.meta.GetFieldIndex(req.GetCollectionID())) - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } - - // Check whether one of the given partitions not loaded - for _, partitionID := range req.GetPartitionIDs() { - partition := job.meta.GetPartition(partitionID) - if partition == nil { - msg := fmt.Sprintf("some partitions %v of collection %v has been loaded into QueryNode, please release partitions firstly", - req.GetPartitionIDs(), - req.GetCollectionID()) - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } - } - return ErrCollectionLoaded - } - - return nil -} - -func (job *LoadPartitionJob) Execute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - zap.Int64s("partitionIDs", req.GetPartitionIDs()), - ) - - meta.GlobalFailedLoadCache.Remove(req.GetCollectionID()) - - // Clear stale replicas - err := job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - log.Warn("failed to clear stale replicas", zap.Error(err)) - return err - } - - // Create replicas - replicas, err := utils.SpawnReplicasWithRG(job.meta, - req.GetCollectionID(), - req.GetResourceGroups(), - req.GetReplicaNumber(), - ) - if err != nil { - msg := "failed to spawn replica for collection" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - for _, replica := range replicas { - log.Info("replica created", - zap.Int64("replicaID", replica.GetID()), - zap.Int64s("nodes", replica.GetNodes()), - zap.String("resourceGroup", replica.GetResourceGroup())) - } - - // It's safe here to call UpdateCollectionNextTargetWithPartitions, as the collection not existing - err = job.targetMgr.UpdateCollectionNextTargetWithPartitions(req.GetCollectionID(), req.GetPartitionIDs()...) - if err != nil { - msg := "failed to update next targets for collection" - log.Error(msg, - zap.Int64s("partitionIDs", req.GetPartitionIDs()), - zap.Error(err)) - return utils.WrapError(msg, err) - } - partitions := lo.Map(req.GetPartitionIDs(), func(partition int64, _ int) *meta.Partition { - return &meta.Partition{ - PartitionLoadInfo: &querypb.PartitionLoadInfo{ - CollectionID: req.GetCollectionID(), - PartitionID: partition, - ReplicaNumber: req.GetReplicaNumber(), - Status: querypb.LoadStatus_Loading, - FieldIndexID: req.GetFieldIndexID(), - }, - CreatedAt: time.Now(), - } - }) - err = job.meta.CollectionManager.PutPartition(partitions...) - if err != nil { - msg := "failed to store partitions" - log.Error(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - - metrics.QueryCoordNumCollections.WithLabelValues().Inc() - return nil -} - -func (job *LoadPartitionJob) PostExecute() { - if job.Error() != nil && !job.meta.Exist(job.CollectionID()) { - job.meta.ReplicaManager.RemoveCollection(job.CollectionID()) - job.targetMgr.RemoveCollection(job.req.GetCollectionID()) - } -} - -type ReleasePartitionJob struct { - *BaseJob - req *querypb.ReleasePartitionsRequest - dist *meta.DistributionManager - meta *meta.Meta - targetMgr *meta.TargetManager - targetObserver *observers.TargetObserver -} - -func NewReleasePartitionJob(ctx context.Context, - req *querypb.ReleasePartitionsRequest, - dist *meta.DistributionManager, - meta *meta.Meta, - targetMgr *meta.TargetManager, - targetObserver *observers.TargetObserver, -) *ReleasePartitionJob { - return &ReleasePartitionJob{ - BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), - req: req, - dist: dist, - meta: meta, - targetMgr: targetMgr, - targetObserver: targetObserver, - } -} - -func (job *ReleasePartitionJob) PreExecute() error { - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", job.req.GetCollectionID()), - ) - if job.meta.CollectionManager.GetLoadType(job.req.GetCollectionID()) == querypb.LoadType_LoadCollection { - msg := "releasing some partitions after load collection is not supported" - log.Warn(msg) - return utils.WrapError(msg, ErrLoadParameterMismatched) - } - return nil -} - -func (job *ReleasePartitionJob) Execute() error { - req := job.req - log := log.Ctx(job.ctx).With( - zap.Int64("collectionID", req.GetCollectionID()), - ) - if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { - log.Info("release collection end, the collection has not been loaded into QueryNode") - return nil - } - - loadedPartitions := job.meta.CollectionManager.GetPartitionsByCollection(req.GetCollectionID()) - partitionIDs := typeutil.NewUniqueSet(req.GetPartitionIDs()...) - toRelease := make([]int64, 0) - for _, partition := range loadedPartitions { - if partitionIDs.Contain(partition.GetPartitionID()) { - toRelease = append(toRelease, partition.GetPartitionID()) - } - } - - if len(toRelease) == len(loadedPartitions) { // All partitions are released, clear all - log.Info("release partitions covers all partitions, will remove the whole collection") - err := job.meta.CollectionManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - msg := "failed to release partitions from store" - log.Warn(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) - if err != nil { - log.Warn("failed to remove replicas", zap.Error(err)) - } - job.targetMgr.RemoveCollection(req.GetCollectionID()) - job.targetObserver.ReleaseCollection(req.GetCollectionID()) - waitCollectionReleased(job.dist, req.GetCollectionID()) - } else { - err := job.meta.CollectionManager.RemovePartition(toRelease...) - if err != nil { - msg := "failed to release partitions from store" - log.Warn(msg, zap.Error(err)) - return utils.WrapError(msg, err) - } - job.targetMgr.RemovePartition(req.GetCollectionID(), toRelease...) - waitCollectionReleased(job.dist, req.GetCollectionID(), toRelease...) - } - metrics.QueryCoordNumCollections.WithLabelValues().Dec() - return nil -} diff --git a/internal/querycoordv2/job/job_load.go b/internal/querycoordv2/job/job_load.go new file mode 100644 index 0000000000..0ed3a8895c --- /dev/null +++ b/internal/querycoordv2/job/job_load.go @@ -0,0 +1,397 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "fmt" + "time" + + "github.com/cockroachdb/errors" + "github.com/samber/lo" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/metrics" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/observers" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" + "github.com/milvus-io/milvus/internal/util/typeutil" +) + +type LoadCollectionJob struct { + *BaseJob + req *querypb.LoadCollectionRequest + undo *UndoList + + dist *meta.DistributionManager + meta *meta.Meta + cluster session.Cluster + targetMgr *meta.TargetManager + targetObserver *observers.TargetObserver + broker meta.Broker + nodeMgr *session.NodeManager +} + +func NewLoadCollectionJob( + ctx context.Context, + req *querypb.LoadCollectionRequest, + dist *meta.DistributionManager, + meta *meta.Meta, + cluster session.Cluster, + targetMgr *meta.TargetManager, + targetObserver *observers.TargetObserver, + broker meta.Broker, + nodeMgr *session.NodeManager, +) *LoadCollectionJob { + return &LoadCollectionJob{ + BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), + req: req, + undo: NewUndoList(ctx, meta, cluster, targetMgr, targetObserver), + dist: dist, + meta: meta, + cluster: cluster, + targetMgr: targetMgr, + targetObserver: targetObserver, + broker: broker, + nodeMgr: nodeMgr, + } +} + +func (job *LoadCollectionJob) PreExecute() error { + req := job.req + log := log.Ctx(job.ctx).With(zap.Int64("collectionID", req.GetCollectionID())) + + if req.GetReplicaNumber() <= 0 { + log.Info("request doesn't indicate the number of replicas, set it to 1", + zap.Int32("replicaNumber", req.GetReplicaNumber())) + req.ReplicaNumber = 1 + } + + collection := job.meta.GetCollection(req.GetCollectionID()) + if collection == nil { + return nil + } + + if collection.GetReplicaNumber() != req.GetReplicaNumber() { + msg := fmt.Sprintf("collection with different replica number %d existed, release this collection first before changing its replica number", + job.meta.GetReplicaNumber(req.GetCollectionID()), + ) + log.Warn(msg) + return utils.WrapError(msg, ErrLoadParameterMismatched) + } else if !typeutil.MapEqual(collection.GetFieldIndexID(), req.GetFieldIndexID()) { + msg := fmt.Sprintf("collection with different index %v existed, release this collection first before changing its index", + collection.GetFieldIndexID()) + log.Warn(msg) + return utils.WrapError(msg, ErrLoadParameterMismatched) + } + + return nil +} + +func (job *LoadCollectionJob) Execute() error { + req := job.req + log := log.Ctx(job.ctx).With(zap.Int64("collectionID", req.GetCollectionID())) + meta.GlobalFailedLoadCache.Remove(req.GetCollectionID()) + + // 1. Fetch target partitions + partitionIDs, err := job.broker.GetPartitions(job.ctx, req.GetCollectionID()) + if err != nil { + msg := "failed to get partitions from RootCoord" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + loadedPartitionIDs := lo.Map(job.meta.CollectionManager.GetPartitionsByCollection(req.GetCollectionID()), + func(partition *meta.Partition, _ int) int64 { + return partition.GetPartitionID() + }) + lackPartitionIDs := lo.FilterMap(partitionIDs, func(partID int64, _ int) (int64, bool) { + return partID, !lo.Contains(loadedPartitionIDs, partID) + }) + if len(lackPartitionIDs) == 0 { + return ErrCollectionLoaded + } + job.undo.CollectionID = req.GetCollectionID() + job.undo.LackPartitions = lackPartitionIDs + log.Info("find partitions to load", zap.Int64s("partitions", lackPartitionIDs)) + + // 2. loadPartitions on QueryNodes + err = loadPartitions(job.ctx, job.meta, job.cluster, false, req.GetCollectionID(), lackPartitionIDs...) + if err != nil { + return err + } + job.undo.PartitionsLoaded = true + + // 3. update next target + _, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID(), partitionIDs...) + if err != nil { + msg := "failed to update next target" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + job.undo.TargetUpdated = true + + colExisted := job.meta.CollectionManager.Exist(req.GetCollectionID()) + if !colExisted { + // Clear stale replicas, https://github.com/milvus-io/milvus/issues/20444 + err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + msg := "failed to clear stale replicas" + log.Warn(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + } + + // 4. create replica if not exist + replicas := job.meta.ReplicaManager.GetByCollection(req.GetCollectionID()) + if len(replicas) == 0 { + replicas, err = utils.SpawnReplicasWithRG(job.meta, req.GetCollectionID(), req.GetResourceGroups(), req.GetReplicaNumber()) + if err != nil { + msg := "failed to spawn replica for collection" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + for _, replica := range replicas { + log.Info("replica created", zap.Int64("replicaID", replica.GetID()), + zap.Int64s("nodes", replica.GetNodes()), zap.String("resourceGroup", replica.GetResourceGroup())) + } + job.undo.NewReplicaCreated = true + } + + // 5. put collection/partitions meta + partitions := lo.Map(lackPartitionIDs, func(partID int64, _ int) *meta.Partition { + return &meta.Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: req.GetCollectionID(), + PartitionID: partID, + Status: querypb.LoadStatus_Loading, + }, + CreatedAt: time.Now(), + } + }) + collection := &meta.Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: req.GetCollectionID(), + ReplicaNumber: req.GetReplicaNumber(), + Status: querypb.LoadStatus_Loading, + FieldIndexID: req.GetFieldIndexID(), + LoadType: querypb.LoadType_LoadCollection, + }, + CreatedAt: time.Now(), + } + err = job.meta.CollectionManager.PutCollection(collection, partitions...) + if err != nil { + msg := "failed to store collection and partitions" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + + if !colExisted { + metrics.QueryCoordNumCollections.WithLabelValues().Inc() + } + metrics.QueryCoordNumPartitions.WithLabelValues().Add(float64(len(partitions))) + return nil +} + +func (job *LoadCollectionJob) PostExecute() { + if job.Error() != nil && !errors.Is(job.Error(), ErrCollectionLoaded) { + job.undo.RollBack() + } +} + +type LoadPartitionJob struct { + *BaseJob + req *querypb.LoadPartitionsRequest + undo *UndoList + + dist *meta.DistributionManager + meta *meta.Meta + cluster session.Cluster + targetMgr *meta.TargetManager + targetObserver *observers.TargetObserver + broker meta.Broker + nodeMgr *session.NodeManager +} + +func NewLoadPartitionJob( + ctx context.Context, + req *querypb.LoadPartitionsRequest, + dist *meta.DistributionManager, + meta *meta.Meta, + cluster session.Cluster, + targetMgr *meta.TargetManager, + targetObserver *observers.TargetObserver, + broker meta.Broker, + nodeMgr *session.NodeManager, +) *LoadPartitionJob { + return &LoadPartitionJob{ + BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), + req: req, + undo: NewUndoList(ctx, meta, cluster, targetMgr, targetObserver), + dist: dist, + meta: meta, + cluster: cluster, + targetMgr: targetMgr, + targetObserver: targetObserver, + broker: broker, + nodeMgr: nodeMgr, + } +} + +func (job *LoadPartitionJob) PreExecute() error { + req := job.req + log := log.Ctx(job.ctx).With(zap.Int64("collectionID", req.GetCollectionID())) + + if req.GetReplicaNumber() <= 0 { + log.Info("request doesn't indicate the number of replicas, set it to 1", + zap.Int32("replicaNumber", req.GetReplicaNumber())) + req.ReplicaNumber = 1 + } + + collection := job.meta.GetCollection(req.GetCollectionID()) + if collection == nil { + return nil + } + + if collection.GetReplicaNumber() != req.GetReplicaNumber() { + msg := "collection with different replica number existed, release this collection first before changing its replica number" + log.Warn(msg) + return utils.WrapError(msg, ErrLoadParameterMismatched) + } else if !typeutil.MapEqual(collection.GetFieldIndexID(), req.GetFieldIndexID()) { + msg := fmt.Sprintf("collection with different index %v existed, release this collection first before changing its index", + job.meta.GetFieldIndex(req.GetCollectionID())) + log.Warn(msg) + return utils.WrapError(msg, ErrLoadParameterMismatched) + } + + return nil +} + +func (job *LoadPartitionJob) Execute() error { + req := job.req + log := log.Ctx(job.ctx).With( + zap.Int64("collectionID", req.GetCollectionID()), + zap.Int64s("partitionIDs", req.GetPartitionIDs()), + ) + meta.GlobalFailedLoadCache.Remove(req.GetCollectionID()) + + // 1. Fetch target partitions + loadedPartitionIDs := lo.Map(job.meta.CollectionManager.GetPartitionsByCollection(req.GetCollectionID()), + func(partition *meta.Partition, _ int) int64 { + return partition.GetPartitionID() + }) + lackPartitionIDs := lo.FilterMap(req.GetPartitionIDs(), func(partID int64, _ int) (int64, bool) { + return partID, !lo.Contains(loadedPartitionIDs, partID) + }) + if len(lackPartitionIDs) == 0 { + return ErrCollectionLoaded + } + job.undo.CollectionID = req.GetCollectionID() + job.undo.LackPartitions = lackPartitionIDs + log.Info("find partitions to load", zap.Int64s("partitions", lackPartitionIDs)) + + // 2. loadPartitions on QueryNodes + err := loadPartitions(job.ctx, job.meta, job.cluster, false, req.GetCollectionID(), lackPartitionIDs...) + if err != nil { + return err + } + job.undo.PartitionsLoaded = true + + // 3. update next target + _, err = job.targetObserver.UpdateNextTarget(req.GetCollectionID(), append(loadedPartitionIDs, lackPartitionIDs...)...) + if err != nil { + msg := "failed to update next target" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + job.undo.TargetUpdated = true + + if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { + // Clear stale replicas, https://github.com/milvus-io/milvus/issues/20444 + err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + msg := "failed to clear stale replicas" + log.Warn(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + } + + // 4. create replica if not exist + replicas := job.meta.ReplicaManager.GetByCollection(req.GetCollectionID()) + if len(replicas) == 0 { + replicas, err = utils.SpawnReplicasWithRG(job.meta, req.GetCollectionID(), req.GetResourceGroups(), req.GetReplicaNumber()) + if err != nil { + msg := "failed to spawn replica for collection" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + for _, replica := range replicas { + log.Info("replica created", zap.Int64("replicaID", replica.GetID()), + zap.Int64s("nodes", replica.GetNodes()), zap.String("resourceGroup", replica.GetResourceGroup())) + } + job.undo.NewReplicaCreated = true + } + + // 5. put collection/partitions meta + partitions := lo.Map(lackPartitionIDs, func(partID int64, _ int) *meta.Partition { + return &meta.Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: req.GetCollectionID(), + PartitionID: partID, + Status: querypb.LoadStatus_Loading, + }, + CreatedAt: time.Now(), + } + }) + if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { + collection := &meta.Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: req.GetCollectionID(), + ReplicaNumber: req.GetReplicaNumber(), + Status: querypb.LoadStatus_Loading, + FieldIndexID: req.GetFieldIndexID(), + LoadType: querypb.LoadType_LoadPartition, + }, + CreatedAt: time.Now(), + } + err = job.meta.CollectionManager.PutCollection(collection, partitions...) + if err != nil { + msg := "failed to store collection and partitions" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + metrics.QueryCoordNumCollections.WithLabelValues().Inc() + } else { // collection exists, put partitions only + err = job.meta.CollectionManager.PutPartition(partitions...) + if err != nil { + msg := "failed to store partitions" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + } + + metrics.QueryCoordNumPartitions.WithLabelValues().Add(float64(len(partitions))) + return nil +} + +func (job *LoadPartitionJob) PostExecute() { + if job.Error() != nil && !errors.Is(job.Error(), ErrCollectionLoaded) { + job.undo.RollBack() + } +} diff --git a/internal/querycoordv2/job/job_release.go b/internal/querycoordv2/job/job_release.go new file mode 100644 index 0000000000..ee0191fcee --- /dev/null +++ b/internal/querycoordv2/job/job_release.go @@ -0,0 +1,176 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + + "github.com/samber/lo" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/metrics" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/observers" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" +) + +type ReleaseCollectionJob struct { + *BaseJob + req *querypb.ReleaseCollectionRequest + dist *meta.DistributionManager + meta *meta.Meta + targetMgr *meta.TargetManager + targetObserver *observers.TargetObserver +} + +func NewReleaseCollectionJob(ctx context.Context, + req *querypb.ReleaseCollectionRequest, + dist *meta.DistributionManager, + meta *meta.Meta, + targetMgr *meta.TargetManager, + targetObserver *observers.TargetObserver, +) *ReleaseCollectionJob { + return &ReleaseCollectionJob{ + BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), + req: req, + dist: dist, + meta: meta, + targetMgr: targetMgr, + targetObserver: targetObserver, + } +} + +func (job *ReleaseCollectionJob) Execute() error { + req := job.req + log := log.Ctx(job.ctx).With(zap.Int64("collectionID", req.GetCollectionID())) + + if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { + log.Info("release collection end, the collection has not been loaded into QueryNode") + return nil + } + + lenPartitions := len(job.meta.CollectionManager.GetPartitionsByCollection(req.GetCollectionID())) + + err := job.meta.CollectionManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + msg := "failed to remove collection" + log.Warn(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + + err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + msg := "failed to remove replicas" + log.Warn(msg, zap.Error(err)) + } + + job.targetMgr.RemoveCollection(req.GetCollectionID()) + job.targetObserver.ReleaseCollection(req.GetCollectionID()) + waitCollectionReleased(job.dist, req.GetCollectionID()) + metrics.QueryCoordNumCollections.WithLabelValues().Dec() + metrics.QueryCoordNumPartitions.WithLabelValues().Sub(float64(lenPartitions)) + return nil +} + +type ReleasePartitionJob struct { + *BaseJob + releasePartitionsOnly bool + + req *querypb.ReleasePartitionsRequest + dist *meta.DistributionManager + meta *meta.Meta + cluster session.Cluster + targetMgr *meta.TargetManager + targetObserver *observers.TargetObserver +} + +func NewReleasePartitionJob(ctx context.Context, + req *querypb.ReleasePartitionsRequest, + dist *meta.DistributionManager, + meta *meta.Meta, + cluster session.Cluster, + targetMgr *meta.TargetManager, + targetObserver *observers.TargetObserver, +) *ReleasePartitionJob { + return &ReleasePartitionJob{ + BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), + req: req, + dist: dist, + meta: meta, + cluster: cluster, + targetMgr: targetMgr, + targetObserver: targetObserver, + } +} + +func (job *ReleasePartitionJob) Execute() error { + req := job.req + log := log.Ctx(job.ctx).With( + zap.Int64("collectionID", req.GetCollectionID()), + zap.Int64s("partitionIDs", req.GetPartitionIDs()), + ) + + if !job.meta.CollectionManager.Exist(req.GetCollectionID()) { + log.Info("release collection end, the collection has not been loaded into QueryNode") + return nil + } + + loadedPartitions := job.meta.CollectionManager.GetPartitionsByCollection(req.GetCollectionID()) + toRelease := lo.FilterMap(loadedPartitions, func(partition *meta.Partition, _ int) (int64, bool) { + return partition.GetPartitionID(), lo.Contains(req.GetPartitionIDs(), partition.GetPartitionID()) + }) + + // If all partitions are released and LoadType is LoadPartition, clear all + if len(toRelease) == len(loadedPartitions) && + job.meta.GetLoadType(req.GetCollectionID()) == querypb.LoadType_LoadPartition { + log.Info("release partitions covers all partitions, will remove the whole collection") + err := job.meta.CollectionManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + msg := "failed to release partitions from store" + log.Warn(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + err = job.meta.ReplicaManager.RemoveCollection(req.GetCollectionID()) + if err != nil { + log.Warn("failed to remove replicas", zap.Error(err)) + } + job.targetMgr.RemoveCollection(req.GetCollectionID()) + job.targetObserver.ReleaseCollection(req.GetCollectionID()) + metrics.QueryCoordNumCollections.WithLabelValues().Dec() + waitCollectionReleased(job.dist, req.GetCollectionID()) + } else { + err := releasePartitions(job.ctx, job.meta, job.cluster, false, req.GetCollectionID(), toRelease...) + if err != nil { + loadPartitions(job.ctx, job.meta, job.cluster, true, req.GetCollectionID(), toRelease...) + return err + } + err = job.meta.CollectionManager.RemovePartition(toRelease...) + if err != nil { + loadPartitions(job.ctx, job.meta, job.cluster, true, req.GetCollectionID(), toRelease...) + msg := "failed to release partitions from store" + log.Warn(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + job.targetMgr.RemovePartition(req.GetCollectionID(), toRelease...) + waitCollectionReleased(job.dist, req.GetCollectionID(), toRelease...) + } + metrics.QueryCoordNumPartitions.WithLabelValues().Sub(float64(len(toRelease))) + return nil +} diff --git a/internal/querycoordv2/job/job_sync.go b/internal/querycoordv2/job/job_sync.go new file mode 100644 index 0000000000..9e653ea681 --- /dev/null +++ b/internal/querycoordv2/job/job_sync.go @@ -0,0 +1,103 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "github.com/cockroachdb/errors" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" +) + +type SyncNewCreatedPartitionJob struct { + *BaseJob + req *querypb.SyncNewCreatedPartitionRequest + meta *meta.Meta + cluster session.Cluster +} + +func NewSyncNewCreatedPartitionJob( + ctx context.Context, + req *querypb.SyncNewCreatedPartitionRequest, + meta *meta.Meta, + cluster session.Cluster, +) *SyncNewCreatedPartitionJob { + return &SyncNewCreatedPartitionJob{ + BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()), + req: req, + meta: meta, + cluster: cluster, + } +} + +func (job *SyncNewCreatedPartitionJob) PreExecute() error { + // check if collection not load or loadType is loadPartition + collection := job.meta.GetCollection(job.req.GetCollectionID()) + if collection == nil || collection.GetLoadType() == querypb.LoadType_LoadPartition { + return ErrPartitionNotInTarget + } + + // check if partition already existed + if partition := job.meta.GetPartition(job.req.GetPartitionID()); partition != nil { + return ErrPartitionNotInTarget + } + return nil +} + +func (job *SyncNewCreatedPartitionJob) Execute() error { + req := job.req + log := log.Ctx(job.ctx).With( + zap.Int64("collectionID", req.GetCollectionID()), + zap.Int64("partitionID", req.GetPartitionID()), + ) + + err := loadPartitions(job.ctx, job.meta, job.cluster, false, req.GetCollectionID(), req.GetPartitionID()) + if err != nil { + return err + } + + partition := &meta.Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: req.GetCollectionID(), + PartitionID: req.GetPartitionID(), + Status: querypb.LoadStatus_Loaded, + }, + LoadPercentage: 100, + CreatedAt: time.Now(), + } + err = job.meta.CollectionManager.PutPartition(partition) + if err != nil { + msg := "failed to store partitions" + log.Error(msg, zap.Error(err)) + return utils.WrapError(msg, err) + } + + return nil +} + +func (job *SyncNewCreatedPartitionJob) PostExecute() { + if job.Error() != nil && !errors.Is(job.Error(), ErrPartitionNotInTarget) { + releasePartitions(job.ctx, job.meta, job.cluster, true, job.req.GetCollectionID(), job.req.GetPartitionID()) + } +} diff --git a/internal/querycoordv2/job/job_test.go b/internal/querycoordv2/job/job_test.go index 794d903e68..097eddc7f9 100644 --- a/internal/querycoordv2/job/job_test.go +++ b/internal/querycoordv2/job/job_test.go @@ -21,10 +21,11 @@ import ( "testing" "github.com/cockroachdb/errors" - + "github.com/samber/lo" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "github.com/milvus-io/milvus-proto/go-api/commonpb" "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" "github.com/milvus-io/milvus/internal/proto/datapb" @@ -33,6 +34,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/observers" . "github.com/milvus-io/milvus/internal/querycoordv2/params" "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/etcd" ) @@ -56,6 +58,7 @@ type JobSuite struct { store meta.Store dist *meta.DistributionManager meta *meta.Meta + cluster *session.MockCluster targetMgr *meta.TargetManager targetObserver *observers.TargetObserver broker *meta.MockBroker @@ -70,8 +73,8 @@ func (suite *JobSuite) SetupSuite() { suite.collections = []int64{1000, 1001} suite.partitions = map[int64][]int64{ - 1000: {100, 101}, - 1001: {102, 103}, + 1000: {100, 101, 102}, + 1001: {103, 104, 105}, } suite.channels = map[int64][]string{ 1000: {"1000-dmc0", "1000-dmc1"}, @@ -81,10 +84,12 @@ func (suite *JobSuite) SetupSuite() { 1000: { 100: {1, 2}, 101: {3, 4}, + 102: {5, 6}, }, 1001: { - 102: {5, 6}, 103: {7, 8}, + 104: {9, 10}, + 105: {11, 12}, }, } suite.loadTypes = map[int64]querypb.LoadType{ @@ -115,6 +120,14 @@ func (suite *JobSuite) SetupSuite() { Return(vChannels, segmentBinlogs, nil) } } + + suite.cluster = session.NewMockCluster(suite.T()) + suite.cluster.EXPECT(). + LoadPartitions(mock.Anything, mock.Anything, mock.Anything). + Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil) + suite.cluster.EXPECT(). + ReleasePartitions(mock.Anything, mock.Anything, mock.Anything). + Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil) } func (suite *JobSuite) SetupTest() { @@ -140,6 +153,7 @@ func (suite *JobSuite) SetupTest() { suite.dist, suite.broker, ) + suite.targetObserver.Start(context.Background()) suite.scheduler = NewScheduler() suite.scheduler.Start(context.Background()) @@ -160,19 +174,14 @@ func (suite *JobSuite) SetupTest() { func (suite *JobSuite) TearDownTest() { suite.kv.Close() suite.scheduler.Stop() + suite.targetObserver.Stop() } func (suite *JobSuite) BeforeTest(suiteName, testName string) { - switch testName { - case "TestLoadCollection": - for collection, partitions := range suite.partitions { - if suite.loadTypes[collection] != querypb.LoadType_LoadCollection { - continue - } - suite.broker.EXPECT(). - GetPartitions(mock.Anything, collection). - Return(partitions, nil) - } + for collection, partitions := range suite.partitions { + suite.broker.EXPECT(). + GetPartitions(mock.Anything, collection). + Return(partitions, nil) } } @@ -195,7 +204,9 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -204,7 +215,7 @@ func (suite *JobSuite) TestLoadCollection() { suite.NoError(err) suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection) - suite.assertLoaded(collection) + suite.assertCollectionLoaded(collection) } // Test load again @@ -220,7 +231,9 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -243,7 +256,9 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -268,13 +283,15 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) suite.scheduler.Add(job) err := job.Wait() - suite.ErrorIs(err, ErrLoadParameterMismatched) + suite.ErrorIs(err, ErrCollectionLoaded) } suite.meta.ResourceManager.AddResourceGroup("rg1") @@ -292,7 +309,9 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -302,7 +321,7 @@ func (suite *JobSuite) TestLoadCollection() { // Load with 3 replica on 3 rg req = &querypb.LoadCollectionRequest{ - CollectionID: 1002, + CollectionID: 1001, ReplicaNumber: 3, ResourceGroups: []string{"rg1", "rg2", "rg3"}, } @@ -311,7 +330,9 @@ func (suite *JobSuite) TestLoadCollection() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -338,7 +359,9 @@ func (suite *JobSuite) TestLoadCollectionWithReplicas() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -368,7 +391,9 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -377,7 +402,7 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() { suite.NoError(err) suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...) - suite.assertLoaded(collection) + suite.assertCollectionLoaded(collection) } // Test load with different index @@ -396,7 +421,9 @@ func (suite *JobSuite) TestLoadCollectionWithDiffIndex() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -425,7 +452,9 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -434,7 +463,7 @@ func (suite *JobSuite) TestLoadPartition() { suite.NoError(err) suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...) - suite.assertLoaded(collection) + suite.assertCollectionLoaded(collection) } // Test load partition again @@ -453,7 +482,9 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -478,7 +509,9 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -496,20 +529,22 @@ func (suite *JobSuite) TestLoadPartition() { req := &querypb.LoadPartitionsRequest{ CollectionID: collection, PartitionIDs: append(suite.partitions[collection], 200), - ReplicaNumber: 3, + ReplicaNumber: 1, } job := NewLoadPartitionJob( ctx, req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) suite.scheduler.Add(job) err := job.Wait() - suite.ErrorIs(err, ErrLoadParameterMismatched) + suite.NoError(err) } // Test load collection while partitions exists @@ -527,13 +562,15 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) suite.scheduler.Add(job) err := job.Wait() - suite.ErrorIs(err, ErrLoadParameterMismatched) + suite.ErrorIs(err, ErrCollectionLoaded) } suite.meta.ResourceManager.AddResourceGroup("rg1") @@ -541,9 +578,11 @@ func (suite *JobSuite) TestLoadPartition() { suite.meta.ResourceManager.AddResourceGroup("rg3") // test load 3 replica in 1 rg, should pass rg check + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(999)).Return([]int64{888}, nil) + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(999), int64(888)).Return(nil, nil, nil) req := &querypb.LoadPartitionsRequest{ - CollectionID: 100, - PartitionIDs: []int64{1001}, + CollectionID: 999, + PartitionIDs: []int64{888}, ReplicaNumber: 3, ResourceGroups: []string{"rg1"}, } @@ -552,7 +591,9 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -561,9 +602,11 @@ func (suite *JobSuite) TestLoadPartition() { suite.Contains(err.Error(), meta.ErrNodeNotEnough.Error()) // test load 3 replica in 3 rg, should pass rg check + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(999)).Return([]int64{888}, nil) + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(999), int64(888)).Return(nil, nil, nil) req = &querypb.LoadPartitionsRequest{ - CollectionID: 102, - PartitionIDs: []int64{1001}, + CollectionID: 999, + PartitionIDs: []int64{888}, ReplicaNumber: 3, ResourceGroups: []string{"rg1", "rg2", "rg3"}, } @@ -572,7 +615,9 @@ func (suite *JobSuite) TestLoadPartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -581,6 +626,120 @@ func (suite *JobSuite) TestLoadPartition() { suite.Contains(err.Error(), meta.ErrNodeNotEnough.Error()) } +func (suite *JobSuite) TestDynamicLoad() { + ctx := context.Background() + + collection := suite.collections[0] + p0, p1, p2 := suite.partitions[collection][0], suite.partitions[collection][1], suite.partitions[collection][2] + newLoadPartJob := func(partitions ...int64) *LoadPartitionJob { + req := &querypb.LoadPartitionsRequest{ + CollectionID: collection, + PartitionIDs: partitions, + ReplicaNumber: 1, + } + job := NewLoadPartitionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.cluster, + suite.targetMgr, + suite.targetObserver, + suite.broker, + suite.nodeMgr, + ) + return job + } + newLoadColJob := func() *LoadCollectionJob { + req := &querypb.LoadCollectionRequest{ + CollectionID: collection, + ReplicaNumber: 1, + } + job := NewLoadCollectionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.cluster, + suite.targetMgr, + suite.targetObserver, + suite.broker, + suite.nodeMgr, + ) + return job + } + + // loaded: none + // action: load p0, p1, p2 + // expect: p0, p1, p2 loaded + job := newLoadPartJob(p0, p1, p2) + suite.scheduler.Add(job) + err := job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p0, p1, p2) + + // loaded: p0, p1, p2 + // action: load p0, p1, p2 + // expect: do nothing, p0, p1, p2 loaded + job = newLoadPartJob(p0, p1, p2) + suite.scheduler.Add(job) + err = job.Wait() + suite.ErrorIs(err, ErrCollectionLoaded) + suite.assertPartitionLoaded(collection) + + // loaded: p0, p1 + // action: load p2 + // expect: p0, p1, p2 loaded + suite.releaseAll() + job = newLoadPartJob(p0, p1) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p0, p1) + job = newLoadPartJob(p2) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p2) + + // loaded: p0, p1 + // action: load p1, p2 + // expect: p0, p1, p2 loaded + suite.releaseAll() + job = newLoadPartJob(p0, p1) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p0, p1) + job = newLoadPartJob(p1, p2) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p2) + + // loaded: p0, p1 + // action: load col + // expect: col loaded + suite.releaseAll() + job = newLoadPartJob(p0, p1) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p0, p1) + colJob := newLoadColJob() + suite.scheduler.Add(colJob) + err = colJob.Wait() + suite.NoError(err) + suite.targetMgr.UpdateCollectionCurrentTarget(collection) + suite.assertPartitionLoaded(collection, p2) +} + func (suite *JobSuite) TestLoadPartitionWithReplicas() { ctx := context.Background() @@ -600,7 +759,9 @@ func (suite *JobSuite) TestLoadPartitionWithReplicas() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -631,7 +792,9 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -640,7 +803,7 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() { suite.NoError(err) suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection, suite.partitions[collection]...) - suite.assertLoaded(collection) + suite.assertCollectionLoaded(collection) } // Test load partition with different index @@ -661,7 +824,9 @@ func (suite *JobSuite) TestLoadPartitionWithDiffIndex() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -692,7 +857,7 @@ func (suite *JobSuite) TestReleaseCollection() { suite.scheduler.Add(job) err := job.Wait() suite.NoError(err) - suite.assertReleased(collection) + suite.assertCollectionReleased(collection) } // Test release again @@ -711,7 +876,7 @@ func (suite *JobSuite) TestReleaseCollection() { suite.scheduler.Add(job) err := job.Wait() suite.NoError(err) - suite.assertReleased(collection) + suite.assertCollectionReleased(collection) } } @@ -731,18 +896,14 @@ func (suite *JobSuite) TestReleasePartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, suite.targetObserver, ) suite.scheduler.Add(job) err := job.Wait() - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.ErrorIs(err, ErrLoadParameterMismatched) - suite.assertLoaded(collection) - } else { - suite.NoError(err) - suite.assertReleased(collection) - } + suite.NoError(err) + suite.assertPartitionReleased(collection, suite.partitions[collection]...) } // Test release again @@ -756,18 +917,14 @@ func (suite *JobSuite) TestReleasePartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, suite.targetObserver, ) suite.scheduler.Add(job) err := job.Wait() - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.ErrorIs(err, ErrLoadParameterMismatched) - suite.assertLoaded(collection) - } else { - suite.NoError(err) - suite.assertReleased(collection) - } + suite.NoError(err) + suite.assertPartitionReleased(collection, suite.partitions[collection]...) } // Test release partial partitions @@ -783,24 +940,114 @@ func (suite *JobSuite) TestReleasePartition() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, suite.targetObserver, ) suite.scheduler.Add(job) err := job.Wait() - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.ErrorIs(err, ErrLoadParameterMismatched) - suite.assertLoaded(collection) - } else { - suite.NoError(err) - suite.True(suite.meta.Exist(collection)) - partitions := suite.meta.GetPartitionsByCollection(collection) - suite.Len(partitions, 1) - suite.Equal(suite.partitions[collection][0], partitions[0].GetPartitionID()) - } + suite.NoError(err) + suite.True(suite.meta.Exist(collection)) + partitions := suite.meta.GetPartitionsByCollection(collection) + suite.Len(partitions, 1) + suite.Equal(suite.partitions[collection][0], partitions[0].GetPartitionID()) + suite.assertPartitionReleased(collection, suite.partitions[collection][1:]...) } } +func (suite *JobSuite) TestDynamicRelease() { + ctx := context.Background() + + col0, col1 := suite.collections[0], suite.collections[1] + p0, p1, p2 := suite.partitions[col0][0], suite.partitions[col0][1], suite.partitions[col0][2] + p3, p4, p5 := suite.partitions[col1][0], suite.partitions[col1][1], suite.partitions[col1][2] + newReleasePartJob := func(col int64, partitions ...int64) *ReleasePartitionJob { + req := &querypb.ReleasePartitionsRequest{ + CollectionID: col, + PartitionIDs: partitions, + } + job := NewReleasePartitionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.cluster, + suite.targetMgr, + suite.targetObserver, + ) + return job + } + newReleaseColJob := func(col int64) *ReleaseCollectionJob { + req := &querypb.ReleaseCollectionRequest{ + CollectionID: col, + } + job := NewReleaseCollectionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.targetMgr, + suite.targetObserver, + ) + return job + } + + // loaded: p0, p1, p2 + // action: release p0 + // expect: p0 released, p1, p2 loaded + suite.loadAll() + job := newReleasePartJob(col0, p0) + suite.scheduler.Add(job) + err := job.Wait() + suite.NoError(err) + suite.assertPartitionReleased(col0, p0) + suite.assertPartitionLoaded(col0, p1, p2) + + // loaded: p1, p2 + // action: release p0, p1 + // expect: p1 released, p2 loaded + job = newReleasePartJob(col0, p0, p1) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.assertPartitionReleased(col0, p0, p1) + suite.assertPartitionLoaded(col0, p2) + + // loaded: p2 + // action: release p2 + // expect: loadType=col: col loaded, p2 released + job = newReleasePartJob(col0, p2) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.assertPartitionReleased(col0, p0, p1, p2) + suite.True(suite.meta.Exist(col0)) + + // loaded: p0, p1, p2 + // action: release col + // expect: col released + suite.releaseAll() + suite.loadAll() + releaseColJob := newReleaseColJob(col0) + suite.scheduler.Add(releaseColJob) + err = releaseColJob.Wait() + suite.NoError(err) + suite.assertCollectionReleased(col0) + suite.assertPartitionReleased(col0, p0, p1, p2) + + // loaded: p3, p4, p5 + // action: release p3, p4, p5 + // expect: loadType=partition: col released + suite.releaseAll() + suite.loadAll() + job = newReleasePartJob(col1, p3, p4, p5) + suite.scheduler.Add(job) + err = job.Wait() + suite.NoError(err) + suite.assertCollectionReleased(col1) + suite.assertPartitionReleased(col1, p3, p4, p5) +} + func (suite *JobSuite) TestLoadCollectionStoreFailed() { // Store collection failed store := meta.NewMockStore(suite.T()) @@ -818,14 +1065,10 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() { if suite.loadTypes[collection] != querypb.LoadType_LoadCollection { continue } - + suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) err := errors.New("failed to store collection") store.EXPECT().SaveReplica(mock.Anything).Return(nil) - store.EXPECT().SaveCollection(&querypb.CollectionLoadInfo{ - CollectionID: collection, - ReplicaNumber: 1, - Status: querypb.LoadStatus_Loading, - }).Return(err) + store.EXPECT().SaveCollection(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(err) store.EXPECT().ReleaseReplicas(collection).Return(nil) req := &querypb.LoadCollectionRequest{ @@ -836,7 +1079,9 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -866,7 +1111,7 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() { } store.EXPECT().SaveReplica(mock.Anything).Return(nil) - store.EXPECT().SavePartition(mock.Anything, mock.Anything).Return(err) + store.EXPECT().SaveCollection(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(err) store.EXPECT().ReleaseReplicas(collection).Return(nil) req := &querypb.LoadPartitionsRequest{ @@ -878,7 +1123,9 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -892,6 +1139,9 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() { // Store replica failed suite.meta = meta.NewMeta(ErrorIDAllocator(), suite.store, session.NewNodeManager()) for _, collection := range suite.collections { + suite.broker.EXPECT(). + GetPartitions(mock.Anything, collection). + Return(suite.partitions[collection], nil) req := &querypb.LoadCollectionRequest{ CollectionID: collection, } @@ -900,7 +1150,9 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -910,6 +1162,59 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() { } } +func (suite *JobSuite) TestSyncNewCreatedPartition() { + newPartition := int64(999) + + // test sync new created partition + suite.loadAll() + req := &querypb.SyncNewCreatedPartitionRequest{ + CollectionID: suite.collections[0], + PartitionID: newPartition, + } + job := NewSyncNewCreatedPartitionJob( + context.Background(), + req, + suite.meta, + suite.cluster, + ) + suite.scheduler.Add(job) + err := job.Wait() + suite.NoError(err) + partition := suite.meta.CollectionManager.GetPartition(newPartition) + suite.NotNil(partition) + suite.Equal(querypb.LoadStatus_Loaded, partition.GetStatus()) + + // test collection not loaded + req = &querypb.SyncNewCreatedPartitionRequest{ + CollectionID: int64(888), + PartitionID: newPartition, + } + job = NewSyncNewCreatedPartitionJob( + context.Background(), + req, + suite.meta, + suite.cluster, + ) + suite.scheduler.Add(job) + err = job.Wait() + suite.ErrorIs(err, ErrPartitionNotInTarget) + + // test collection loaded, but its loadType is loadPartition + req = &querypb.SyncNewCreatedPartitionRequest{ + CollectionID: suite.collections[1], + PartitionID: newPartition, + } + job = NewSyncNewCreatedPartitionJob( + context.Background(), + req, + suite.meta, + suite.cluster, + ) + suite.scheduler.Add(job) + err = job.Wait() + suite.ErrorIs(err, ErrPartitionNotInTarget) +} + func (suite *JobSuite) loadAll() { ctx := context.Background() for _, collection := range suite.collections { @@ -922,7 +1227,9 @@ func (suite *JobSuite) loadAll() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -932,6 +1239,7 @@ func (suite *JobSuite) loadAll() { suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.True(suite.meta.Exist(collection)) suite.NotNil(suite.meta.GetCollection(collection)) + suite.NotNil(suite.meta.GetPartitionsByCollection(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection) } else { req := &querypb.LoadPartitionsRequest{ @@ -943,7 +1251,9 @@ func (suite *JobSuite) loadAll() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -952,6 +1262,7 @@ func (suite *JobSuite) loadAll() { suite.NoError(err) suite.EqualValues(1, suite.meta.GetReplicaNumber(collection)) suite.True(suite.meta.Exist(collection)) + suite.NotNil(suite.meta.GetCollection(collection)) suite.NotNil(suite.meta.GetPartitionsByCollection(collection)) suite.targetMgr.UpdateCollectionCurrentTarget(collection) } @@ -975,24 +1286,43 @@ func (suite *JobSuite) releaseAll() { suite.scheduler.Add(job) err := job.Wait() suite.NoError(err) - suite.assertReleased(collection) + suite.assertCollectionReleased(collection) } } -func (suite *JobSuite) assertLoaded(collection int64) { +func (suite *JobSuite) assertCollectionLoaded(collection int64) { suite.True(suite.meta.Exist(collection)) + suite.NotEqual(0, len(suite.meta.ReplicaManager.GetByCollection(collection))) for _, channel := range suite.channels[collection] { suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget)) } - for _, partitions := range suite.segments[collection] { - for _, segment := range partitions { + for _, segments := range suite.segments[collection] { + for _, segment := range segments { suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget)) } } } -func (suite *JobSuite) assertReleased(collection int64) { +func (suite *JobSuite) assertPartitionLoaded(collection int64, partitionIDs ...int64) { + suite.True(suite.meta.Exist(collection)) + suite.NotEqual(0, len(suite.meta.ReplicaManager.GetByCollection(collection))) + for _, channel := range suite.channels[collection] { + suite.NotNil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget)) + } + for partitionID, segments := range suite.segments[collection] { + if !lo.Contains(partitionIDs, partitionID) { + continue + } + suite.NotNil(suite.meta.GetPartition(partitionID)) + for _, segment := range segments { + suite.NotNil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget)) + } + } +} + +func (suite *JobSuite) assertCollectionReleased(collection int64) { suite.False(suite.meta.Exist(collection)) + suite.Equal(0, len(suite.meta.ReplicaManager.GetByCollection(collection))) for _, channel := range suite.channels[collection] { suite.Nil(suite.targetMgr.GetDmChannel(collection, channel, meta.CurrentTarget)) } @@ -1003,6 +1333,16 @@ func (suite *JobSuite) assertReleased(collection int64) { } } +func (suite *JobSuite) assertPartitionReleased(collection int64, partitionIDs ...int64) { + for _, partition := range partitionIDs { + suite.Nil(suite.meta.GetPartition(partition)) + segments := suite.segments[collection][partition] + for _, segment := range segments { + suite.Nil(suite.targetMgr.GetHistoricalSegment(collection, segment, meta.CurrentTarget)) + } + } +} + func TestJob(t *testing.T) { suite.Run(t, new(JobSuite)) } diff --git a/internal/querycoordv2/job/undo.go b/internal/querycoordv2/job/undo.go new file mode 100644 index 0000000000..ae456f865a --- /dev/null +++ b/internal/querycoordv2/job/undo.go @@ -0,0 +1,68 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/observers" + "github.com/milvus-io/milvus/internal/querycoordv2/session" +) + +type UndoList struct { + PartitionsLoaded bool // indicates if partitions loaded in QueryNodes during loading + TargetUpdated bool // indicates if target updated during loading + NewReplicaCreated bool // indicates if created new replicas during loading + + CollectionID int64 + LackPartitions []int64 + + ctx context.Context + meta *meta.Meta + cluster session.Cluster + targetMgr *meta.TargetManager + targetObserver *observers.TargetObserver +} + +func NewUndoList(ctx context.Context, meta *meta.Meta, + cluster session.Cluster, targetMgr *meta.TargetManager, targetObserver *observers.TargetObserver) *UndoList { + return &UndoList{ + ctx: ctx, + meta: meta, + cluster: cluster, + targetMgr: targetMgr, + targetObserver: targetObserver, + } +} + +func (u *UndoList) RollBack() { + if u.PartitionsLoaded { + releasePartitions(u.ctx, u.meta, u.cluster, true, u.CollectionID, u.LackPartitions...) + } + if u.TargetUpdated { + if !u.meta.CollectionManager.Exist(u.CollectionID) { + u.targetMgr.RemoveCollection(u.CollectionID) + u.targetObserver.ReleaseCollection(u.CollectionID) + } else { + u.targetMgr.RemovePartition(u.CollectionID, u.LackPartitions...) + } + } + if u.NewReplicaCreated { + u.meta.ReplicaManager.RemoveCollection(u.CollectionID) + } +} diff --git a/internal/querycoordv2/job/utils.go b/internal/querycoordv2/job/utils.go index 9f80a01dd4..b3cfc17455 100644 --- a/internal/querycoordv2/job/utils.go +++ b/internal/querycoordv2/job/utils.go @@ -17,11 +17,17 @@ package job import ( + "context" + "fmt" "time" - "github.com/milvus-io/milvus/internal/querycoordv2/meta" - "github.com/milvus-io/milvus/internal/util/typeutil" "github.com/samber/lo" + + "github.com/milvus-io/milvus-proto/go-api/commonpb" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/util/typeutil" ) // waitCollectionReleased blocks until @@ -49,3 +55,57 @@ func waitCollectionReleased(dist *meta.DistributionManager, collection int64, pa time.Sleep(200 * time.Millisecond) } } + +func loadPartitions(ctx context.Context, meta *meta.Meta, cluster session.Cluster, + ignoreErr bool, collection int64, partitions ...int64) error { + replicas := meta.ReplicaManager.GetByCollection(collection) + loadReq := &querypb.LoadPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_LoadPartitions, + }, + CollectionID: collection, + PartitionIDs: partitions, + } + for _, replica := range replicas { + for _, node := range replica.GetNodes() { + status, err := cluster.LoadPartitions(ctx, node, loadReq) + if ignoreErr { + continue + } + if err != nil { + return err + } + if status.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("QueryNode failed to loadPartition, nodeID=%d, err=%s", node, status.GetReason()) + } + } + } + return nil +} + +func releasePartitions(ctx context.Context, meta *meta.Meta, cluster session.Cluster, + ignoreErr bool, collection int64, partitions ...int64) error { + replicas := meta.ReplicaManager.GetByCollection(collection) + releaseReq := &querypb.ReleasePartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ReleasePartitions, + }, + CollectionID: collection, + PartitionIDs: partitions, + } + for _, replica := range replicas { + for _, node := range replica.GetNodes() { + status, err := cluster.ReleasePartitions(ctx, node, releaseReq) + if ignoreErr { + continue + } + if err != nil { + return err + } + if status.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("QueryNode failed to releasePartitions, nodeID=%d, err=%s", node, status.GetReason()) + } + } + } + return nil +} diff --git a/internal/querycoordv2/meta/collection_manager.go b/internal/querycoordv2/meta/collection_manager.go index 3b96ff6b13..44e9a9fa5e 100644 --- a/internal/querycoordv2/meta/collection_manager.go +++ b/internal/querycoordv2/meta/collection_manager.go @@ -17,15 +17,19 @@ package meta import ( + "context" "sync" "time" "github.com/golang/protobuf/proto" + "github.com/samber/lo" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/util/merr" "github.com/milvus-io/milvus/internal/util/typeutil" . "github.com/milvus-io/milvus/internal/util/typeutil" - "github.com/samber/lo" ) type Collection struct { @@ -72,7 +76,7 @@ func NewCollectionManager(store Store) *CollectionManager { // Recover recovers collections from kv store, // panics if failed -func (m *CollectionManager) Recover() error { +func (m *CollectionManager) Recover(broker Broker) error { collections, err := m.store.GetCollections() if err != nil { return err @@ -88,7 +92,6 @@ func (m *CollectionManager) Recover() error { m.store.ReleaseCollection(collection.GetCollectionID()) continue } - m.collections[collection.CollectionID] = &Collection{ CollectionLoadInfo: collection, } @@ -104,94 +107,171 @@ func (m *CollectionManager) Recover() error { m.store.ReleasePartition(collection, partitionIDs...) break } - m.partitions[partition.PartitionID] = &Partition{ PartitionLoadInfo: partition, } } } + err = m.upgradeRecover(broker) + if err != nil { + log.Error("upgrade recover failed", zap.Error(err)) + return err + } return nil } -func (m *CollectionManager) GetCollection(id UniqueID) *Collection { - m.rwmutex.RLock() - defer m.rwmutex.RUnlock() - - return m.collections[id] -} - -func (m *CollectionManager) GetPartition(id UniqueID) *Partition { - m.rwmutex.RLock() - defer m.rwmutex.RUnlock() - - return m.partitions[id] -} - -func (m *CollectionManager) GetLoadType(id UniqueID) querypb.LoadType { - m.rwmutex.RLock() - defer m.rwmutex.RUnlock() - - _, ok := m.collections[id] - if ok { - return querypb.LoadType_LoadCollection +// upgradeRecover recovers from old version <= 2.2.x for compatibility. +func (m *CollectionManager) upgradeRecover(broker Broker) error { + for _, collection := range m.GetAllCollections() { + // It's a workaround to check if it is old CollectionLoadInfo because there's no + // loadType in old version, maybe we should use version instead. + if collection.GetLoadType() == querypb.LoadType_UnKnownType { + partitionIDs, err := broker.GetPartitions(context.Background(), collection.GetCollectionID()) + if err != nil { + return err + } + partitions := lo.Map(partitionIDs, func(partitionID int64, _ int) *Partition { + return &Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: collection.GetCollectionID(), + PartitionID: partitionID, + Status: querypb.LoadStatus_Loaded, + }, + LoadPercentage: 100, + } + }) + err = m.putPartition(partitions, true) + if err != nil { + return err + } + } } - if len(m.getPartitionsByCollection(id)) > 0 { - return querypb.LoadType_LoadPartition + for _, partition := range m.GetAllPartitions() { + // In old version, collection would NOT be stored if the partition existed. + if _, ok := m.collections[partition.GetCollectionID()]; !ok { + col := &Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: partition.GetCollectionID(), + ReplicaNumber: partition.GetReplicaNumber(), + Status: partition.GetStatus(), + FieldIndexID: partition.GetFieldIndexID(), + LoadType: querypb.LoadType_LoadPartition, + }, + LoadPercentage: 100, + } + err := m.PutCollection(col) + if err != nil { + return err + } + } + } + return nil +} + +func (m *CollectionManager) GetCollection(collectionID UniqueID) *Collection { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + return m.collections[collectionID] +} + +func (m *CollectionManager) GetPartition(partitionID UniqueID) *Partition { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + return m.partitions[partitionID] +} + +func (m *CollectionManager) GetLoadType(collectionID UniqueID) querypb.LoadType { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + collection, ok := m.collections[collectionID] + if ok { + return collection.GetLoadType() } return querypb.LoadType_UnKnownType } -func (m *CollectionManager) GetReplicaNumber(id UniqueID) int32 { +func (m *CollectionManager) GetReplicaNumber(collectionID UniqueID) int32 { m.rwmutex.RLock() defer m.rwmutex.RUnlock() - collection, ok := m.collections[id] + collection, ok := m.collections[collectionID] if ok { return collection.GetReplicaNumber() } - partitions := m.getPartitionsByCollection(id) - if len(partitions) > 0 { - return partitions[0].GetReplicaNumber() + return -1 +} + +// GetCurrentLoadPercentage checks if collection is currently fully loaded. +func (m *CollectionManager) GetCurrentLoadPercentage(collectionID UniqueID) int32 { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + collection, ok := m.collections[collectionID] + if ok { + partitions := m.getPartitionsByCollection(collectionID) + if len(partitions) > 0 { + return lo.SumBy(partitions, func(partition *Partition) int32 { + return partition.LoadPercentage + }) / int32(len(partitions)) + } + if collection.GetLoadType() == querypb.LoadType_LoadCollection { + // no partition exists + return 100 + } } return -1 } -func (m *CollectionManager) GetLoadPercentage(id UniqueID) int32 { +// GetCollectionLoadPercentage returns collection load percentage. +// Note: collection.LoadPercentage == 100 only means that it used to be fully loaded, and it is queryable, +// to check if it is fully loaded now, use GetCurrentLoadPercentage instead. +func (m *CollectionManager) GetCollectionLoadPercentage(collectionID UniqueID) int32 { m.rwmutex.RLock() defer m.rwmutex.RUnlock() - collection, ok := m.collections[id] + collection, ok := m.collections[collectionID] if ok { return collection.LoadPercentage } - partitions := m.getPartitionsByCollection(id) - if len(partitions) > 0 { - return lo.SumBy(partitions, func(partition *Partition) int32 { - return partition.LoadPercentage - }) / int32(len(partitions)) + return -1 +} + +func (m *CollectionManager) GetPartitionLoadPercentage(partitionID UniqueID) int32 { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + partition, ok := m.partitions[partitionID] + if ok { + return partition.LoadPercentage } return -1 } -func (m *CollectionManager) GetStatus(id UniqueID) querypb.LoadStatus { +func (m *CollectionManager) GetStatus(collectionID UniqueID) querypb.LoadStatus { m.rwmutex.RLock() defer m.rwmutex.RUnlock() - collection, ok := m.collections[id] - if ok { - return collection.GetStatus() - } - partitions := m.getPartitionsByCollection(id) - if len(partitions) == 0 { + collection, ok := m.collections[collectionID] + if !ok { return querypb.LoadStatus_Invalid } + partitions := m.getPartitionsByCollection(collectionID) for _, partition := range partitions { if partition.GetStatus() == querypb.LoadStatus_Loading { return querypb.LoadStatus_Loading } } - return querypb.LoadStatus_Loaded + if len(partitions) > 0 { + return querypb.LoadStatus_Loaded + } + if collection.GetLoadType() == querypb.LoadType_LoadCollection { + return querypb.LoadStatus_Loaded + } + return querypb.LoadStatus_Invalid } func (m *CollectionManager) GetFieldIndex(collectionID UniqueID) map[int64]int64 { @@ -202,11 +282,7 @@ func (m *CollectionManager) GetFieldIndex(collectionID UniqueID) map[int64]int64 if ok { return collection.GetFieldIndexID() } - partitions := m.getPartitionsByCollection(collectionID) - if len(partitions) == 0 { - return nil - } - return partitions[0].GetFieldIndexID() + return nil } // ContainAnyIndex returns true if the loaded collection contains one of the given indexes, @@ -228,31 +304,18 @@ func (m *CollectionManager) containIndex(collectionID, indexID int64) bool { if ok { return lo.Contains(lo.Values(collection.GetFieldIndexID()), indexID) } - partitions := m.getPartitionsByCollection(collectionID) - if len(partitions) == 0 { - return false - } - for _, partition := range partitions { - if lo.Contains(lo.Values(partition.GetFieldIndexID()), indexID) { - return true - } - } return false } -func (m *CollectionManager) Exist(id UniqueID) bool { +func (m *CollectionManager) Exist(collectionID UniqueID) bool { m.rwmutex.RLock() defer m.rwmutex.RUnlock() - _, ok := m.collections[id] - if ok { - return true - } - partitions := m.getPartitionsByCollection(id) - return len(partitions) > 0 + _, ok := m.collections[collectionID] + return ok } -// GetAll returns the collection ID of all loaded collections and partitions +// GetAll returns the collection ID of all loaded collections func (m *CollectionManager) GetAll() []int64 { m.rwmutex.RLock() defer m.rwmutex.RUnlock() @@ -261,9 +324,6 @@ func (m *CollectionManager) GetAll() []int64 { for _, collection := range m.collections { ids.Insert(collection.GetCollectionID()) } - for _, partition := range m.partitions { - ids.Insert(partition.GetCollectionID()) - } return ids.Collect() } @@ -298,11 +358,11 @@ func (m *CollectionManager) getPartitionsByCollection(collectionID UniqueID) []* return partitions } -func (m *CollectionManager) PutCollection(collection *Collection) error { +func (m *CollectionManager) PutCollection(collection *Collection, partitions ...*Partition) error { m.rwmutex.Lock() defer m.rwmutex.Unlock() - return m.putCollection(collection, true) + return m.putCollection(true, collection, partitions...) } func (m *CollectionManager) UpdateCollection(collection *Collection) error { @@ -314,7 +374,7 @@ func (m *CollectionManager) UpdateCollection(collection *Collection) error { return merr.WrapErrCollectionNotFound(collection.GetCollectionID()) } - return m.putCollection(collection, true) + return m.putCollection(true, collection) } func (m *CollectionManager) UpdateCollectionInMemory(collection *Collection) bool { @@ -326,17 +386,24 @@ func (m *CollectionManager) UpdateCollectionInMemory(collection *Collection) boo return false } - m.putCollection(collection, false) + m.putCollection(false, collection) return true } -func (m *CollectionManager) putCollection(collection *Collection, withSave bool) error { +func (m *CollectionManager) putCollection(withSave bool, collection *Collection, partitions ...*Partition) error { if withSave { - err := m.store.SaveCollection(collection.CollectionLoadInfo) + partitionInfos := lo.Map(partitions, func(partition *Partition, _ int) *querypb.PartitionLoadInfo { + return partition.PartitionLoadInfo + }) + err := m.store.SaveCollection(collection.CollectionLoadInfo, partitionInfos...) if err != nil { return err } } + for _, partition := range partitions { + partition.UpdatedAt = time.Now() + m.partitions[partition.GetPartitionID()] = partition + } collection.UpdatedAt = time.Now() m.collections[collection.CollectionID] = collection @@ -399,25 +466,25 @@ func (m *CollectionManager) putPartition(partitions []*Partition, withSave bool) return nil } -func (m *CollectionManager) RemoveCollection(id UniqueID) error { +// RemoveCollection removes collection and its partitions. +func (m *CollectionManager) RemoveCollection(collectionID UniqueID) error { m.rwmutex.Lock() defer m.rwmutex.Unlock() - _, ok := m.collections[id] + _, ok := m.collections[collectionID] if ok { - err := m.store.ReleaseCollection(id) + err := m.store.ReleaseCollection(collectionID) if err != nil { return err } - delete(m.collections, id) - return nil + delete(m.collections, collectionID) + for partID, partition := range m.partitions { + if partition.CollectionID == collectionID { + delete(m.partitions, partID) + } + } } - - partitions := lo.Map(m.getPartitionsByCollection(id), - func(partition *Partition, _ int) int64 { - return partition.GetPartitionID() - }) - return m.removePartition(partitions...) + return nil } func (m *CollectionManager) RemovePartition(ids ...UniqueID) error { diff --git a/internal/querycoordv2/meta/collection_manager_test.go b/internal/querycoordv2/meta/collection_manager_test.go index 540cc4f0fe..5a206c57f9 100644 --- a/internal/querycoordv2/meta/collection_manager_test.go +++ b/internal/querycoordv2/meta/collection_manager_test.go @@ -21,12 +21,15 @@ import ( "testing" "time" + "github.com/samber/lo" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" "github.com/milvus-io/milvus/internal/proto/querypb" . "github.com/milvus-io/milvus/internal/querycoordv2/params" "github.com/milvus-io/milvus/internal/util/etcd" - "github.com/stretchr/testify/suite" ) type CollectionManagerSuite struct { @@ -37,11 +40,13 @@ type CollectionManagerSuite struct { partitions map[int64][]int64 // CollectionID -> PartitionIDs loadTypes []querypb.LoadType replicaNumber []int32 - loadPercentage []int32 + colLoadPercent []int32 + parLoadPercent map[int64][]int32 // Mocks - kv kv.MetaKv - store Store + kv kv.MetaKv + store Store + broker *MockBroker // Test object mgr *CollectionManager @@ -50,19 +55,27 @@ type CollectionManagerSuite struct { func (suite *CollectionManagerSuite) SetupSuite() { Params.Init() - suite.collections = []int64{100, 101, 102} + suite.collections = []int64{100, 101, 102, 103} suite.partitions = map[int64][]int64{ 100: {10}, 101: {11, 12}, 102: {13, 14, 15}, + 103: {}, // not partition in this col } suite.loadTypes = []querypb.LoadType{ querypb.LoadType_LoadCollection, querypb.LoadType_LoadPartition, querypb.LoadType_LoadCollection, + querypb.LoadType_LoadCollection, + } + suite.replicaNumber = []int32{1, 2, 3, 1} + suite.colLoadPercent = []int32{0, 50, 100, 100} + suite.parLoadPercent = map[int64][]int32{ + 100: {0}, + 101: {0, 100}, + 102: {100, 100, 100}, + 103: {}, } - suite.replicaNumber = []int32{1, 2, 3} - suite.loadPercentage = []int32{0, 50, 100} } func (suite *CollectionManagerSuite) SetupTest() { @@ -79,6 +92,7 @@ func (suite *CollectionManagerSuite) SetupTest() { suite.Require().NoError(err) suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath.GetValue()) suite.store = NewMetaStore(suite.kv) + suite.broker = NewMockBroker(suite.T()) suite.mgr = NewCollectionManager(suite.store) suite.loadAll() @@ -94,18 +108,18 @@ func (suite *CollectionManagerSuite) TestGetProperty() { for i, collection := range suite.collections { loadType := mgr.GetLoadType(collection) replicaNumber := mgr.GetReplicaNumber(collection) - percentage := mgr.GetLoadPercentage(collection) + percentage := mgr.GetCurrentLoadPercentage(collection) exist := mgr.Exist(collection) suite.Equal(suite.loadTypes[i], loadType) suite.Equal(suite.replicaNumber[i], replicaNumber) - suite.Equal(suite.loadPercentage[i], percentage) + suite.Equal(suite.colLoadPercent[i], percentage) suite.True(exist) } invalidCollection := -1 loadType := mgr.GetLoadType(int64(invalidCollection)) replicaNumber := mgr.GetReplicaNumber(int64(invalidCollection)) - percentage := mgr.GetLoadPercentage(int64(invalidCollection)) + percentage := mgr.GetCurrentLoadPercentage(int64(invalidCollection)) exist := mgr.Exist(int64(invalidCollection)) suite.Equal(querypb.LoadType_UnKnownType, loadType) suite.EqualValues(-1, replicaNumber) @@ -113,33 +127,45 @@ func (suite *CollectionManagerSuite) TestGetProperty() { suite.False(exist) } -func (suite *CollectionManagerSuite) TestGet() { - mgr := suite.mgr - - allCollections := mgr.GetAllCollections() - allPartitions := mgr.GetAllPartitions() - for i, collectionID := range suite.collections { - if suite.loadTypes[i] == querypb.LoadType_LoadCollection { - collection := mgr.GetCollection(collectionID) - suite.Equal(collectionID, collection.GetCollectionID()) - suite.Contains(allCollections, collection) - } else { - partitions := mgr.GetPartitionsByCollection(collectionID) - suite.Len(partitions, len(suite.partitions[collectionID])) - - for _, partitionID := range suite.partitions[collectionID] { - partition := mgr.GetPartition(partitionID) - suite.Equal(collectionID, partition.GetCollectionID()) - suite.Equal(partitionID, partition.GetPartitionID()) - suite.Contains(partitions, partition) - suite.Contains(allPartitions, partition) - } +func (suite *CollectionManagerSuite) TestPut() { + suite.releaseAll() + // test put collection with partitions + for i, collection := range suite.collections { + status := querypb.LoadStatus_Loaded + if suite.colLoadPercent[i] < 100 { + status = querypb.LoadStatus_Loading } - } - all := mgr.GetAll() - sort.Slice(all, func(i, j int) bool { return all[i] < all[j] }) - suite.Equal(suite.collections, all) + col := &Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: collection, + ReplicaNumber: suite.replicaNumber[i], + Status: status, + LoadType: suite.loadTypes[i], + }, + LoadPercentage: suite.colLoadPercent[i], + CreatedAt: time.Now(), + } + partitions := lo.Map(suite.partitions[collection], func(partition int64, j int) *Partition { + return &Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: collection, + PartitionID: partition, + ReplicaNumber: suite.replicaNumber[i], + Status: status, + }, + LoadPercentage: suite.parLoadPercent[collection][j], + CreatedAt: time.Now(), + } + }) + err := suite.mgr.PutCollection(col, partitions...) + suite.NoError(err) + } + suite.checkLoadResult() +} + +func (suite *CollectionManagerSuite) TestGet() { + suite.checkLoadResult() } func (suite *CollectionManagerSuite) TestUpdate() { @@ -177,7 +203,7 @@ func (suite *CollectionManagerSuite) TestUpdate() { } suite.clearMemory() - err := mgr.Recover() + err := mgr.Recover(suite.broker) suite.NoError(err) collections = mgr.GetAllCollections() partitions = mgr.GetAllPartitions() @@ -215,7 +241,7 @@ func (suite *CollectionManagerSuite) TestRemove() { } // Make sure the removes applied to meta store - err := mgr.Recover() + err := mgr.Recover(suite.broker) suite.NoError(err) for i, collectionID := range suite.collections { if suite.loadTypes[i] == querypb.LoadType_LoadCollection { @@ -237,37 +263,50 @@ func (suite *CollectionManagerSuite) TestRemove() { suite.Empty(partitions) } } + + // remove collection would release its partitions also + suite.releaseAll() + suite.loadAll() + for _, collectionID := range suite.collections { + err := mgr.RemoveCollection(collectionID) + suite.NoError(err) + err = mgr.Recover(suite.broker) + suite.NoError(err) + collection := mgr.GetCollection(collectionID) + suite.Nil(collection) + partitions := mgr.GetPartitionsByCollection(collectionID) + suite.Empty(partitions) + } } func (suite *CollectionManagerSuite) TestRecover() { mgr := suite.mgr suite.clearMemory() - err := mgr.Recover() + err := mgr.Recover(suite.broker) suite.NoError(err) for i, collection := range suite.collections { - exist := suite.loadPercentage[i] == 100 + exist := suite.colLoadPercent[i] == 100 suite.Equal(exist, mgr.Exist(collection)) } } -func (suite *CollectionManagerSuite) loadAll() { +func (suite *CollectionManagerSuite) TestUpgradeRecover() { + suite.releaseAll() mgr := suite.mgr + // put old version of collections and partitions for i, collection := range suite.collections { status := querypb.LoadStatus_Loaded - if suite.loadPercentage[i] < 100 { - status = querypb.LoadStatus_Loading - } - if suite.loadTypes[i] == querypb.LoadType_LoadCollection { mgr.PutCollection(&Collection{ CollectionLoadInfo: &querypb.CollectionLoadInfo{ CollectionID: collection, ReplicaNumber: suite.replicaNumber[i], Status: status, + LoadType: querypb.LoadType_UnKnownType, // old version's collection didn't set loadType }, - LoadPercentage: suite.loadPercentage[i], + LoadPercentage: suite.colLoadPercent[i], CreatedAt: time.Now(), }) } else { @@ -279,12 +318,92 @@ func (suite *CollectionManagerSuite) loadAll() { ReplicaNumber: suite.replicaNumber[i], Status: status, }, - LoadPercentage: suite.loadPercentage[i], + LoadPercentage: suite.colLoadPercent[i], CreatedAt: time.Now(), }) } } } + + // set expectations + for i, collection := range suite.collections { + if suite.loadTypes[i] == querypb.LoadType_LoadCollection { + suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) + } + } + + // do recovery + suite.clearMemory() + err := mgr.Recover(suite.broker) + suite.NoError(err) + suite.checkLoadResult() +} + +func (suite *CollectionManagerSuite) loadAll() { + mgr := suite.mgr + + for i, collection := range suite.collections { + status := querypb.LoadStatus_Loaded + if suite.colLoadPercent[i] < 100 { + status = querypb.LoadStatus_Loading + } + + mgr.PutCollection(&Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: collection, + ReplicaNumber: suite.replicaNumber[i], + Status: status, + LoadType: suite.loadTypes[i], + }, + LoadPercentage: suite.colLoadPercent[i], + CreatedAt: time.Now(), + }) + + for j, partition := range suite.partitions[collection] { + mgr.PutPartition(&Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ + CollectionID: collection, + PartitionID: partition, + Status: status, + }, + LoadPercentage: suite.parLoadPercent[collection][j], + CreatedAt: time.Now(), + }) + } + } +} + +func (suite *CollectionManagerSuite) checkLoadResult() { + mgr := suite.mgr + + allCollections := mgr.GetAllCollections() + allPartitions := mgr.GetAllPartitions() + for _, collectionID := range suite.collections { + collection := mgr.GetCollection(collectionID) + suite.Equal(collectionID, collection.GetCollectionID()) + suite.Contains(allCollections, collection) + + partitions := mgr.GetPartitionsByCollection(collectionID) + suite.Len(partitions, len(suite.partitions[collectionID])) + for _, partitionID := range suite.partitions[collectionID] { + partition := mgr.GetPartition(partitionID) + suite.Equal(collectionID, partition.GetCollectionID()) + suite.Equal(partitionID, partition.GetPartitionID()) + suite.Contains(partitions, partition) + suite.Contains(allPartitions, partition) + } + } + + all := mgr.GetAll() + sort.Slice(all, func(i, j int) bool { return all[i] < all[j] }) + suite.Equal(suite.collections, all) +} + +func (suite *CollectionManagerSuite) releaseAll() { + for _, collection := range suite.collections { + err := suite.mgr.RemoveCollection(collection) + suite.NoError(err) + } } func (suite *CollectionManagerSuite) clearMemory() { diff --git a/internal/querycoordv2/meta/mock_store.go b/internal/querycoordv2/meta/mock_store.go index 7e6bc55390..31f2ed1c04 100644 --- a/internal/querycoordv2/meta/mock_store.go +++ b/internal/querycoordv2/meta/mock_store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.16.0. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package meta @@ -200,13 +200,13 @@ func (_c *MockStore_GetResourceGroups_Call) Return(_a0 []*querypb.ResourceGroup, return _c } -// ReleaseCollection provides a mock function with given fields: id -func (_m *MockStore) ReleaseCollection(id int64) error { - ret := _m.Called(id) +// ReleaseCollection provides a mock function with given fields: collection +func (_m *MockStore) ReleaseCollection(collection int64) error { + ret := _m.Called(collection) var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { - r0 = rf(id) + r0 = rf(collection) } else { r0 = ret.Error(0) } @@ -220,12 +220,12 @@ type MockStore_ReleaseCollection_Call struct { } // ReleaseCollection is a helper method to define mock.On call -// - id int64 -func (_e *MockStore_Expecter) ReleaseCollection(id interface{}) *MockStore_ReleaseCollection_Call { - return &MockStore_ReleaseCollection_Call{Call: _e.mock.On("ReleaseCollection", id)} +// - collection int64 +func (_e *MockStore_Expecter) ReleaseCollection(collection interface{}) *MockStore_ReleaseCollection_Call { + return &MockStore_ReleaseCollection_Call{Call: _e.mock.On("ReleaseCollection", collection)} } -func (_c *MockStore_ReleaseCollection_Call) Run(run func(id int64)) *MockStore_ReleaseCollection_Call { +func (_c *MockStore_ReleaseCollection_Call) Run(run func(collection int64)) *MockStore_ReleaseCollection_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(int64)) }) @@ -264,8 +264,8 @@ type MockStore_ReleasePartition_Call struct { } // ReleasePartition is a helper method to define mock.On call -// - collection int64 -// - partitions ...int64 +// - collection int64 +// - partitions ...int64 func (_e *MockStore_Expecter) ReleasePartition(collection interface{}, partitions ...interface{}) *MockStore_ReleasePartition_Call { return &MockStore_ReleasePartition_Call{Call: _e.mock.On("ReleasePartition", append([]interface{}{collection}, partitions...)...)} @@ -309,8 +309,8 @@ type MockStore_ReleaseReplica_Call struct { } // ReleaseReplica is a helper method to define mock.On call -// - collection int64 -// - replica int64 +// - collection int64 +// - replica int64 func (_e *MockStore_Expecter) ReleaseReplica(collection interface{}, replica interface{}) *MockStore_ReleaseReplica_Call { return &MockStore_ReleaseReplica_Call{Call: _e.mock.On("ReleaseReplica", collection, replica)} } @@ -347,7 +347,7 @@ type MockStore_ReleaseReplicas_Call struct { } // ReleaseReplicas is a helper method to define mock.On call -// - collectionID int64 +// - collectionID int64 func (_e *MockStore_Expecter) ReleaseReplicas(collectionID interface{}) *MockStore_ReleaseReplicas_Call { return &MockStore_ReleaseReplicas_Call{Call: _e.mock.On("ReleaseReplicas", collectionID)} } @@ -384,7 +384,7 @@ type MockStore_RemoveResourceGroup_Call struct { } // RemoveResourceGroup is a helper method to define mock.On call -// - rgName string +// - rgName string func (_e *MockStore_Expecter) RemoveResourceGroup(rgName interface{}) *MockStore_RemoveResourceGroup_Call { return &MockStore_RemoveResourceGroup_Call{Call: _e.mock.On("RemoveResourceGroup", rgName)} } @@ -401,13 +401,20 @@ func (_c *MockStore_RemoveResourceGroup_Call) Return(_a0 error) *MockStore_Remov return _c } -// SaveCollection provides a mock function with given fields: info -func (_m *MockStore) SaveCollection(info *querypb.CollectionLoadInfo) error { - ret := _m.Called(info) +// SaveCollection provides a mock function with given fields: collection, partitions +func (_m *MockStore) SaveCollection(collection *querypb.CollectionLoadInfo, partitions ...*querypb.PartitionLoadInfo) error { + _va := make([]interface{}, len(partitions)) + for _i := range partitions { + _va[_i] = partitions[_i] + } + var _ca []interface{} + _ca = append(_ca, collection) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(*querypb.CollectionLoadInfo) error); ok { - r0 = rf(info) + if rf, ok := ret.Get(0).(func(*querypb.CollectionLoadInfo, ...*querypb.PartitionLoadInfo) error); ok { + r0 = rf(collection, partitions...) } else { r0 = ret.Error(0) } @@ -421,14 +428,22 @@ type MockStore_SaveCollection_Call struct { } // SaveCollection is a helper method to define mock.On call -// - info *querypb.CollectionLoadInfo -func (_e *MockStore_Expecter) SaveCollection(info interface{}) *MockStore_SaveCollection_Call { - return &MockStore_SaveCollection_Call{Call: _e.mock.On("SaveCollection", info)} +// - collection *querypb.CollectionLoadInfo +// - partitions ...*querypb.PartitionLoadInfo +func (_e *MockStore_Expecter) SaveCollection(collection interface{}, partitions ...interface{}) *MockStore_SaveCollection_Call { + return &MockStore_SaveCollection_Call{Call: _e.mock.On("SaveCollection", + append([]interface{}{collection}, partitions...)...)} } -func (_c *MockStore_SaveCollection_Call) Run(run func(info *querypb.CollectionLoadInfo)) *MockStore_SaveCollection_Call { +func (_c *MockStore_SaveCollection_Call) Run(run func(collection *querypb.CollectionLoadInfo, partitions ...*querypb.PartitionLoadInfo)) *MockStore_SaveCollection_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*querypb.CollectionLoadInfo)) + variadicArgs := make([]*querypb.PartitionLoadInfo, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(*querypb.PartitionLoadInfo) + } + } + run(args[0].(*querypb.CollectionLoadInfo), variadicArgs...) }) return _c } @@ -464,7 +479,7 @@ type MockStore_SavePartition_Call struct { } // SavePartition is a helper method to define mock.On call -// - info ...*querypb.PartitionLoadInfo +// - info ...*querypb.PartitionLoadInfo func (_e *MockStore_Expecter) SavePartition(info ...interface{}) *MockStore_SavePartition_Call { return &MockStore_SavePartition_Call{Call: _e.mock.On("SavePartition", append([]interface{}{}, info...)...)} @@ -508,7 +523,7 @@ type MockStore_SaveReplica_Call struct { } // SaveReplica is a helper method to define mock.On call -// - replica *querypb.Replica +// - replica *querypb.Replica func (_e *MockStore_Expecter) SaveReplica(replica interface{}) *MockStore_SaveReplica_Call { return &MockStore_SaveReplica_Call{Call: _e.mock.On("SaveReplica", replica)} } @@ -551,7 +566,7 @@ type MockStore_SaveResourceGroup_Call struct { } // SaveResourceGroup is a helper method to define mock.On call -// - rgs ...*querypb.ResourceGroup +// - rgs ...*querypb.ResourceGroup func (_e *MockStore_Expecter) SaveResourceGroup(rgs ...interface{}) *MockStore_SaveResourceGroup_Call { return &MockStore_SaveResourceGroup_Call{Call: _e.mock.On("SaveResourceGroup", append([]interface{}{}, rgs...)...)} diff --git a/internal/querycoordv2/meta/store.go b/internal/querycoordv2/meta/store.go index 274fe3508d..776b2ab293 100644 --- a/internal/querycoordv2/meta/store.go +++ b/internal/querycoordv2/meta/store.go @@ -61,13 +61,23 @@ func NewMetaStore(cli kv.MetaKv) metaStore { } } -func (s metaStore) SaveCollection(info *querypb.CollectionLoadInfo) error { - k := encodeCollectionLoadInfoKey(info.GetCollectionID()) - v, err := proto.Marshal(info) +func (s metaStore) SaveCollection(collection *querypb.CollectionLoadInfo, partitions ...*querypb.PartitionLoadInfo) error { + k := encodeCollectionLoadInfoKey(collection.GetCollectionID()) + v, err := proto.Marshal(collection) if err != nil { return err } - return s.cli.Save(k, string(v)) + kvs := make(map[string]string) + for _, partition := range partitions { + key := encodePartitionLoadInfoKey(partition.GetCollectionID(), partition.GetPartitionID()) + value, err := proto.Marshal(partition) + if err != nil { + return err + } + kvs[key] = string(value) + } + kvs[k] = string(v) + return s.cli.MultiSave(kvs) } func (s metaStore) SavePartition(info ...*querypb.PartitionLoadInfo) error { @@ -211,9 +221,27 @@ func (s metaStore) GetResourceGroups() ([]*querypb.ResourceGroup, error) { return ret, nil } -func (s metaStore) ReleaseCollection(id int64) error { - k := encodeCollectionLoadInfoKey(id) - return s.cli.Remove(k) +func (s metaStore) ReleaseCollection(collection int64) error { + // obtain partitions of this collection + _, values, err := s.cli.LoadWithPrefix(fmt.Sprintf("%s/%d", PartitionLoadInfoPrefix, collection)) + if err != nil { + return err + } + partitions := make([]*querypb.PartitionLoadInfo, 0) + for _, v := range values { + info := querypb.PartitionLoadInfo{} + if err = proto.Unmarshal([]byte(v), &info); err != nil { + return err + } + partitions = append(partitions, &info) + } + // remove collection and obtained partitions + keys := lo.Map(partitions, func(partition *querypb.PartitionLoadInfo, _ int) string { + return encodePartitionLoadInfoKey(collection, partition.GetPartitionID()) + }) + k := encodeCollectionLoadInfoKey(collection) + keys = append(keys, k) + return s.cli.MultiRemove(keys) } func (s metaStore) ReleasePartition(collection int64, partitions ...int64) error { diff --git a/internal/querycoordv2/meta/store_test.go b/internal/querycoordv2/meta/store_test.go index bfcb547a2c..c509687e03 100644 --- a/internal/querycoordv2/meta/store_test.go +++ b/internal/querycoordv2/meta/store_test.go @@ -81,6 +81,39 @@ func (suite *StoreTestSuite) TestCollection() { suite.Len(collections, 1) } +func (suite *StoreTestSuite) TestCollectionWithPartition() { + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 1, + }) + + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 2, + }, &querypb.PartitionLoadInfo{ + CollectionID: 2, + PartitionID: 102, + }) + + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 3, + }, &querypb.PartitionLoadInfo{ + CollectionID: 3, + PartitionID: 103, + }) + + suite.store.ReleaseCollection(1) + suite.store.ReleaseCollection(2) + + collections, err := suite.store.GetCollections() + suite.NoError(err) + suite.Len(collections, 1) + suite.Equal(int64(3), collections[0].GetCollectionID()) + partitions, err := suite.store.GetPartitions() + suite.NoError(err) + suite.Len(partitions, 1) + suite.Len(partitions[int64(3)], 1) + suite.Equal(int64(103), partitions[int64(3)][0].GetPartitionID()) +} + func (suite *StoreTestSuite) TestPartition() { suite.store.SavePartition(&querypb.PartitionLoadInfo{ PartitionID: 1, diff --git a/internal/querycoordv2/meta/target_manager.go b/internal/querycoordv2/meta/target_manager.go index e78ed0852a..874603b3b0 100644 --- a/internal/querycoordv2/meta/target_manager.go +++ b/internal/querycoordv2/meta/target_manager.go @@ -106,22 +106,10 @@ func (mgr *TargetManager) UpdateCollectionNextTarget(collectionID int64) error { mgr.rwMutex.Lock() defer mgr.rwMutex.Unlock() - partitionIDs := make([]int64, 0) - collection := mgr.meta.GetCollection(collectionID) - if collection != nil { - var err error - partitionIDs, err = mgr.broker.GetPartitions(context.Background(), collectionID) - if err != nil { - return err - } - } else { - partitions := mgr.meta.GetPartitionsByCollection(collectionID) - if partitions != nil { - partitionIDs = lo.Map(partitions, func(partition *Partition, i int) int64 { - return partition.PartitionID - }) - } - } + partitions := mgr.meta.GetPartitionsByCollection(collectionID) + partitionIDs := lo.Map(partitions, func(partition *Partition, i int) int64 { + return partition.PartitionID + }) return mgr.updateCollectionNextTarget(collectionID, partitionIDs...) } @@ -146,14 +134,27 @@ func (mgr *TargetManager) updateCollectionNextTarget(collectionID int64, partiti return nil } -func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, partitionIDs ...int64) (*CollectionTarget, error) { +func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, chosenPartitionIDs ...int64) (*CollectionTarget, error) { log.Info("start to pull next targets for partition", zap.Int64("collectionID", collectionID), - zap.Int64s("partitionIDs", partitionIDs)) + zap.Int64s("chosenPartitionIDs", chosenPartitionIDs)) channelInfos := make(map[string][]*datapb.VchannelInfo) segments := make(map[int64]*datapb.SegmentInfo, 0) - for _, partitionID := range partitionIDs { + dmChannels := make(map[string]*DmChannel) + + if len(chosenPartitionIDs) == 0 { + return NewCollectionTarget(segments, dmChannels), nil + } + + fullPartitions, err := broker.GetPartitions(context.Background(), collectionID) + if err != nil { + return nil, err + } + + // we should pull `channel targets` from all partitions because QueryNodes need to load + // the complete growing segments. And we should pull `segments targets` only from the chosen partitions. + for _, partitionID := range fullPartitions { log.Debug("get recovery info...", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID)) @@ -161,7 +162,12 @@ func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, part if err != nil { return nil, err } - + for _, info := range vChannelInfos { + channelInfos[info.GetChannelName()] = append(channelInfos[info.GetChannelName()], info) + } + if !lo.Contains(chosenPartitionIDs, partitionID) { + continue + } for _, binlog := range binlogs { segments[binlog.GetSegmentID()] = &datapb.SegmentInfo{ ID: binlog.GetSegmentID(), @@ -174,18 +180,12 @@ func (mgr *TargetManager) PullNextTarget(broker Broker, collectionID int64, part Deltalogs: binlog.GetDeltalogs(), } } - - for _, info := range vChannelInfos { - channelInfos[info.GetChannelName()] = append(channelInfos[info.GetChannelName()], info) - } } - dmChannels := make(map[string]*DmChannel) for _, infos := range channelInfos { merged := mgr.mergeDmChannelInfo(infos) dmChannels[merged.GetChannelName()] = merged } - return NewCollectionTarget(segments, dmChannels), nil } diff --git a/internal/querycoordv2/meta/target_manager_test.go b/internal/querycoordv2/meta/target_manager_test.go index ff13d6f5d3..a5f03c14d9 100644 --- a/internal/querycoordv2/meta/target_manager_test.go +++ b/internal/querycoordv2/meta/target_manager_test.go @@ -126,7 +126,7 @@ func (suite *TargetManagerSuite) SetupTest() { } suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, partition).Return(dmChannels, allSegments, nil) } - + suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) suite.mgr.UpdateCollectionNextTargetWithPartitions(collection, suite.partitions[collection]...) } } @@ -192,6 +192,7 @@ func (suite *TargetManagerSuite) TestUpdateNextTarget() { }, } + suite.broker.EXPECT().GetPartitions(mock.Anything, collectionID).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collectionID, int64(1)).Return(nextTargetChannels, nextTargetSegments, nil) suite.mgr.UpdateCollectionNextTargetWithPartitions(collectionID, int64(1)) suite.assertSegments([]int64{11, 12}, suite.mgr.GetHistoricalSegmentsByCollection(collectionID, NextTarget)) diff --git a/internal/querycoordv2/mocks/mock_querynode.go b/internal/querycoordv2/mocks/mock_querynode.go index 567ea03171..1139810331 100644 --- a/internal/querycoordv2/mocks/mock_querynode.go +++ b/internal/querycoordv2/mocks/mock_querynode.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.16.0. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -58,8 +58,8 @@ type MockQueryNodeServer_GetComponentStates_Call struct { } // GetComponentStates is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *milvuspb.GetComponentStatesRequest +// - _a0 context.Context +// - _a1 *milvuspb.GetComponentStatesRequest func (_e *MockQueryNodeServer_Expecter) GetComponentStates(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetComponentStates_Call { return &MockQueryNodeServer_GetComponentStates_Call{Call: _e.mock.On("GetComponentStates", _a0, _a1)} } @@ -105,8 +105,8 @@ type MockQueryNodeServer_GetDataDistribution_Call struct { } // GetDataDistribution is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.GetDataDistributionRequest +// - _a0 context.Context +// - _a1 *querypb.GetDataDistributionRequest func (_e *MockQueryNodeServer_Expecter) GetDataDistribution(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetDataDistribution_Call { return &MockQueryNodeServer_GetDataDistribution_Call{Call: _e.mock.On("GetDataDistribution", _a0, _a1)} } @@ -152,8 +152,8 @@ type MockQueryNodeServer_GetMetrics_Call struct { } // GetMetrics is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *milvuspb.GetMetricsRequest +// - _a0 context.Context +// - _a1 *milvuspb.GetMetricsRequest func (_e *MockQueryNodeServer_Expecter) GetMetrics(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetMetrics_Call { return &MockQueryNodeServer_GetMetrics_Call{Call: _e.mock.On("GetMetrics", _a0, _a1)} } @@ -199,8 +199,8 @@ type MockQueryNodeServer_GetSegmentInfo_Call struct { } // GetSegmentInfo is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.GetSegmentInfoRequest +// - _a0 context.Context +// - _a1 *querypb.GetSegmentInfoRequest func (_e *MockQueryNodeServer_Expecter) GetSegmentInfo(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetSegmentInfo_Call { return &MockQueryNodeServer_GetSegmentInfo_Call{Call: _e.mock.On("GetSegmentInfo", _a0, _a1)} } @@ -246,8 +246,8 @@ type MockQueryNodeServer_GetStatistics_Call struct { } // GetStatistics is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.GetStatisticsRequest +// - _a0 context.Context +// - _a1 *querypb.GetStatisticsRequest func (_e *MockQueryNodeServer_Expecter) GetStatistics(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetStatistics_Call { return &MockQueryNodeServer_GetStatistics_Call{Call: _e.mock.On("GetStatistics", _a0, _a1)} } @@ -293,8 +293,8 @@ type MockQueryNodeServer_GetStatisticsChannel_Call struct { } // GetStatisticsChannel is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *internalpb.GetStatisticsChannelRequest +// - _a0 context.Context +// - _a1 *internalpb.GetStatisticsChannelRequest func (_e *MockQueryNodeServer_Expecter) GetStatisticsChannel(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetStatisticsChannel_Call { return &MockQueryNodeServer_GetStatisticsChannel_Call{Call: _e.mock.On("GetStatisticsChannel", _a0, _a1)} } @@ -340,8 +340,8 @@ type MockQueryNodeServer_GetTimeTickChannel_Call struct { } // GetTimeTickChannel is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *internalpb.GetTimeTickChannelRequest +// - _a0 context.Context +// - _a1 *internalpb.GetTimeTickChannelRequest func (_e *MockQueryNodeServer_Expecter) GetTimeTickChannel(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_GetTimeTickChannel_Call { return &MockQueryNodeServer_GetTimeTickChannel_Call{Call: _e.mock.On("GetTimeTickChannel", _a0, _a1)} } @@ -358,6 +358,53 @@ func (_c *MockQueryNodeServer_GetTimeTickChannel_Call) Return(_a0 *milvuspb.Stri return _c } +// LoadPartitions provides a mock function with given fields: _a0, _a1 +func (_m *MockQueryNodeServer) LoadPartitions(_a0 context.Context, _a1 *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + ret := _m.Called(_a0, _a1) + + var r0 *commonpb.Status + if rf, ok := ret.Get(0).(func(context.Context, *querypb.LoadPartitionsRequest) *commonpb.Status); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*commonpb.Status) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *querypb.LoadPartitionsRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQueryNodeServer_LoadPartitions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadPartitions' +type MockQueryNodeServer_LoadPartitions_Call struct { + *mock.Call +} + +// LoadPartitions is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *querypb.LoadPartitionsRequest +func (_e *MockQueryNodeServer_Expecter) LoadPartitions(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_LoadPartitions_Call { + return &MockQueryNodeServer_LoadPartitions_Call{Call: _e.mock.On("LoadPartitions", _a0, _a1)} +} + +func (_c *MockQueryNodeServer_LoadPartitions_Call) Run(run func(_a0 context.Context, _a1 *querypb.LoadPartitionsRequest)) *MockQueryNodeServer_LoadPartitions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*querypb.LoadPartitionsRequest)) + }) + return _c +} + +func (_c *MockQueryNodeServer_LoadPartitions_Call) Return(_a0 *commonpb.Status, _a1 error) *MockQueryNodeServer_LoadPartitions_Call { + _c.Call.Return(_a0, _a1) + return _c +} + // LoadSegments provides a mock function with given fields: _a0, _a1 func (_m *MockQueryNodeServer) LoadSegments(_a0 context.Context, _a1 *querypb.LoadSegmentsRequest) (*commonpb.Status, error) { ret := _m.Called(_a0, _a1) @@ -387,8 +434,8 @@ type MockQueryNodeServer_LoadSegments_Call struct { } // LoadSegments is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.LoadSegmentsRequest +// - _a0 context.Context +// - _a1 *querypb.LoadSegmentsRequest func (_e *MockQueryNodeServer_Expecter) LoadSegments(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_LoadSegments_Call { return &MockQueryNodeServer_LoadSegments_Call{Call: _e.mock.On("LoadSegments", _a0, _a1)} } @@ -434,8 +481,8 @@ type MockQueryNodeServer_Query_Call struct { } // Query is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.QueryRequest +// - _a0 context.Context +// - _a1 *querypb.QueryRequest func (_e *MockQueryNodeServer_Expecter) Query(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_Query_Call { return &MockQueryNodeServer_Query_Call{Call: _e.mock.On("Query", _a0, _a1)} } @@ -481,8 +528,8 @@ type MockQueryNodeServer_ReleaseCollection_Call struct { } // ReleaseCollection is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.ReleaseCollectionRequest +// - _a0 context.Context +// - _a1 *querypb.ReleaseCollectionRequest func (_e *MockQueryNodeServer_Expecter) ReleaseCollection(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_ReleaseCollection_Call { return &MockQueryNodeServer_ReleaseCollection_Call{Call: _e.mock.On("ReleaseCollection", _a0, _a1)} } @@ -528,8 +575,8 @@ type MockQueryNodeServer_ReleasePartitions_Call struct { } // ReleasePartitions is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.ReleasePartitionsRequest +// - _a0 context.Context +// - _a1 *querypb.ReleasePartitionsRequest func (_e *MockQueryNodeServer_Expecter) ReleasePartitions(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_ReleasePartitions_Call { return &MockQueryNodeServer_ReleasePartitions_Call{Call: _e.mock.On("ReleasePartitions", _a0, _a1)} } @@ -575,8 +622,8 @@ type MockQueryNodeServer_ReleaseSegments_Call struct { } // ReleaseSegments is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.ReleaseSegmentsRequest +// - _a0 context.Context +// - _a1 *querypb.ReleaseSegmentsRequest func (_e *MockQueryNodeServer_Expecter) ReleaseSegments(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_ReleaseSegments_Call { return &MockQueryNodeServer_ReleaseSegments_Call{Call: _e.mock.On("ReleaseSegments", _a0, _a1)} } @@ -622,8 +669,8 @@ type MockQueryNodeServer_Search_Call struct { } // Search is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.SearchRequest +// - _a0 context.Context +// - _a1 *querypb.SearchRequest func (_e *MockQueryNodeServer_Expecter) Search(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_Search_Call { return &MockQueryNodeServer_Search_Call{Call: _e.mock.On("Search", _a0, _a1)} } @@ -669,8 +716,8 @@ type MockQueryNodeServer_ShowConfigurations_Call struct { } // ShowConfigurations is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *internalpb.ShowConfigurationsRequest +// - _a0 context.Context +// - _a1 *internalpb.ShowConfigurationsRequest func (_e *MockQueryNodeServer_Expecter) ShowConfigurations(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_ShowConfigurations_Call { return &MockQueryNodeServer_ShowConfigurations_Call{Call: _e.mock.On("ShowConfigurations", _a0, _a1)} } @@ -716,8 +763,8 @@ type MockQueryNodeServer_SyncDistribution_Call struct { } // SyncDistribution is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.SyncDistributionRequest +// - _a0 context.Context +// - _a1 *querypb.SyncDistributionRequest func (_e *MockQueryNodeServer_Expecter) SyncDistribution(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_SyncDistribution_Call { return &MockQueryNodeServer_SyncDistribution_Call{Call: _e.mock.On("SyncDistribution", _a0, _a1)} } @@ -763,8 +810,8 @@ type MockQueryNodeServer_SyncReplicaSegments_Call struct { } // SyncReplicaSegments is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.SyncReplicaSegmentsRequest +// - _a0 context.Context +// - _a1 *querypb.SyncReplicaSegmentsRequest func (_e *MockQueryNodeServer_Expecter) SyncReplicaSegments(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_SyncReplicaSegments_Call { return &MockQueryNodeServer_SyncReplicaSegments_Call{Call: _e.mock.On("SyncReplicaSegments", _a0, _a1)} } @@ -810,8 +857,8 @@ type MockQueryNodeServer_UnsubDmChannel_Call struct { } // UnsubDmChannel is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.UnsubDmChannelRequest +// - _a0 context.Context +// - _a1 *querypb.UnsubDmChannelRequest func (_e *MockQueryNodeServer_Expecter) UnsubDmChannel(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_UnsubDmChannel_Call { return &MockQueryNodeServer_UnsubDmChannel_Call{Call: _e.mock.On("UnsubDmChannel", _a0, _a1)} } @@ -857,8 +904,8 @@ type MockQueryNodeServer_WatchDmChannels_Call struct { } // WatchDmChannels is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *querypb.WatchDmChannelsRequest +// - _a0 context.Context +// - _a1 *querypb.WatchDmChannelsRequest func (_e *MockQueryNodeServer_Expecter) WatchDmChannels(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_WatchDmChannels_Call { return &MockQueryNodeServer_WatchDmChannels_Call{Call: _e.mock.On("WatchDmChannels", _a0, _a1)} } diff --git a/internal/querycoordv2/observers/collection_observer.go b/internal/querycoordv2/observers/collection_observer.go index 6569700ecf..4945799c54 100644 --- a/internal/querycoordv2/observers/collection_observer.go +++ b/internal/querycoordv2/observers/collection_observer.go @@ -33,12 +33,11 @@ import ( type CollectionObserver struct { stopCh chan struct{} - dist *meta.DistributionManager - meta *meta.Meta - targetMgr *meta.TargetManager - targetObserver *TargetObserver - collectionLoadedCount map[int64]int - partitionLoadedCount map[int64]int + dist *meta.DistributionManager + meta *meta.Meta + targetMgr *meta.TargetManager + targetObserver *TargetObserver + partitionLoadedCount map[int64]int stopOnce sync.Once } @@ -50,13 +49,12 @@ func NewCollectionObserver( targetObserver *TargetObserver, ) *CollectionObserver { return &CollectionObserver{ - stopCh: make(chan struct{}), - dist: dist, - meta: meta, - targetMgr: targetMgr, - targetObserver: targetObserver, - collectionLoadedCount: make(map[int64]int), - partitionLoadedCount: make(map[int64]int), + stopCh: make(chan struct{}), + dist: dist, + meta: meta, + targetMgr: targetMgr, + targetObserver: targetObserver, + partitionLoadedCount: make(map[int64]int), } } @@ -115,36 +113,24 @@ func (ob *CollectionObserver) observeTimeout() { log.Info("observes partitions timeout", zap.Int("partitionNum", len(partitions))) } for collection, partitions := range partitions { - log := log.With( - zap.Int64("collectionID", collection), - ) for _, partition := range partitions { if partition.GetStatus() != querypb.LoadStatus_Loading || time.Now().Before(partition.UpdatedAt.Add(Params.QueryCoordCfg.LoadTimeoutSeconds.GetAsDuration(time.Second))) { continue } - log.Info("load partition timeout, cancel all partitions", + log.Info("load partition timeout, cancel it", + zap.Int64("collectionID", collection), zap.Int64("partitionID", partition.GetPartitionID()), zap.Duration("loadTime", time.Since(partition.CreatedAt))) - // TODO(yah01): Now, releasing part of partitions is not allowed - ob.meta.CollectionManager.RemoveCollection(partition.GetCollectionID()) - ob.meta.ReplicaManager.RemoveCollection(partition.GetCollectionID()) - ob.targetMgr.RemoveCollection(partition.GetCollectionID()) + ob.meta.CollectionManager.RemovePartition(partition.GetPartitionID()) + ob.targetMgr.RemovePartition(partition.GetCollectionID(), partition.GetPartitionID()) break } } } func (ob *CollectionObserver) observeLoadStatus() { - collections := ob.meta.CollectionManager.GetAllCollections() - for _, collection := range collections { - if collection.LoadPercentage == 100 { - continue - } - ob.observeCollectionLoadStatus(collection) - } - partitions := ob.meta.CollectionManager.GetAllPartitions() if len(partitions) > 0 { log.Info("observe partitions status", zap.Int("partitionNum", len(partitions))) @@ -153,61 +139,30 @@ func (ob *CollectionObserver) observeLoadStatus() { if partition.LoadPercentage == 100 { continue } - ob.observePartitionLoadStatus(partition) + replicaNum := ob.meta.GetReplicaNumber(partition.GetCollectionID()) + ob.observePartitionLoadStatus(partition, replicaNum) + } + + collections := ob.meta.CollectionManager.GetAllCollections() + for _, collection := range collections { + if collection.LoadPercentage == 100 { + continue + } + ob.observeCollectionLoadStatus(collection) } } func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Collection) { log := log.With(zap.Int64("collectionID", collection.GetCollectionID())) - segmentTargets := ob.targetMgr.GetHistoricalSegmentsByCollection(collection.GetCollectionID(), meta.NextTarget) - channelTargets := ob.targetMgr.GetDmChannelsByCollection(collection.GetCollectionID(), meta.NextTarget) - targetNum := len(segmentTargets) + len(channelTargets) - log.Info("collection targets", - zap.Int("segmentTargetNum", len(segmentTargets)), - zap.Int("channelTargetNum", len(channelTargets)), - zap.Int("totalTargetNum", targetNum), - zap.Int32("replicaNum", collection.GetReplicaNumber()), - ) - updated := collection.Clone() - loadedCount := 0 - if targetNum == 0 { - log.Info("No segment/channel in target need to be loaded!") - updated.LoadPercentage = 100 - } else { - for _, channel := range channelTargets { - group := utils.GroupNodesByReplica(ob.meta.ReplicaManager, - collection.GetCollectionID(), - ob.dist.LeaderViewManager.GetChannelDist(channel.GetChannelName())) - loadedCount += len(group) - } - subChannelCount := loadedCount - for _, segment := range segmentTargets { - group := utils.GroupNodesByReplica(ob.meta.ReplicaManager, - collection.GetCollectionID(), - ob.dist.LeaderViewManager.GetSealedSegmentDist(segment.GetID())) - loadedCount += len(group) - } - if loadedCount > 0 { - log.Info("collection load progress", - zap.Int("subChannelCount", subChannelCount), - zap.Int("loadSegmentCount", loadedCount-subChannelCount), - ) - } - - updated.LoadPercentage = int32(loadedCount * 100 / (targetNum * int(collection.GetReplicaNumber()))) - } - - if loadedCount <= ob.collectionLoadedCount[collection.GetCollectionID()] && - updated.LoadPercentage != 100 { - ob.collectionLoadedCount[collection.GetCollectionID()] = loadedCount + percentage := ob.meta.CollectionManager.GetCurrentLoadPercentage(collection.GetCollectionID()) + if percentage <= updated.LoadPercentage { return } - ob.collectionLoadedCount[collection.GetCollectionID()] = loadedCount + updated.LoadPercentage = percentage if updated.LoadPercentage == 100 && ob.targetObserver.Check(updated.GetCollectionID()) { - delete(ob.collectionLoadedCount, collection.GetCollectionID()) updated.Status = querypb.LoadStatus_Loaded ob.meta.CollectionManager.UpdateCollection(updated) @@ -221,7 +176,7 @@ func (ob *CollectionObserver) observeCollectionLoadStatus(collection *meta.Colle zap.Int32("collectionStatus", int32(updated.GetStatus()))) } -func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partition) { +func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partition, replicaNum int32) { log := log.With( zap.Int64("collectionID", partition.GetCollectionID()), zap.Int64("partitionID", partition.GetPartitionID()), @@ -234,7 +189,7 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti zap.Int("segmentTargetNum", len(segmentTargets)), zap.Int("channelTargetNum", len(channelTargets)), zap.Int("totalTargetNum", targetNum), - zap.Int32("replicaNum", partition.GetReplicaNumber()), + zap.Int32("replicaNum", replicaNum), ) loadedCount := 0 @@ -261,7 +216,7 @@ func (ob *CollectionObserver) observePartitionLoadStatus(partition *meta.Partiti zap.Int("subChannelCount", subChannelCount), zap.Int("loadSegmentCount", loadedCount-subChannelCount)) } - updated.LoadPercentage = int32(loadedCount * 100 / (targetNum * int(partition.GetReplicaNumber()))) + updated.LoadPercentage = int32(loadedCount * 100 / (targetNum * int(replicaNum))) } if loadedCount <= ob.partitionLoadedCount[partition.GetPartitionID()] && diff --git a/internal/querycoordv2/observers/collection_observer_test.go b/internal/querycoordv2/observers/collection_observer_test.go index fec8699ec1..c323cccd85 100644 --- a/internal/querycoordv2/observers/collection_observer_test.go +++ b/internal/querycoordv2/observers/collection_observer_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/samber/lo" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" clientv3 "go.etcd.io/etcd/client/v3" @@ -200,7 +201,7 @@ func (suite *CollectionObserverSuite) SetupTest() { suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil).Maybe() } suite.targetObserver.Start(context.Background()) - + suite.ob.Start(context.Background()) suite.loadAll() } @@ -212,22 +213,12 @@ func (suite *CollectionObserverSuite) TearDownTest() { func (suite *CollectionObserverSuite) TestObserve() { const ( - timeout = 2 * time.Second + timeout = 3 * time.Second ) // time before load time := suite.meta.GetCollection(suite.collections[2]).UpdatedAt // Not timeout - paramtable.Get().Save(Params.QueryCoordCfg.LoadTimeoutSeconds.Key, "2") - - segments := []*datapb.SegmentBinlogs{} - for _, segment := range suite.segments[100] { - segments = append(segments, &datapb.SegmentBinlogs{ - SegmentID: segment.GetID(), - InsertChannel: segment.GetInsertChannel(), - }) - } - - suite.ob.Start(context.Background()) + paramtable.Get().Save(Params.QueryCoordCfg.LoadTimeoutSeconds.Key, "3") // Collection 100 loaded before timeout, // collection 101 timeout @@ -282,9 +273,45 @@ func (suite *CollectionObserverSuite) TestObserve() { }, timeout*2, timeout/10) } +func (suite *CollectionObserverSuite) TestObservePartition() { + const ( + timeout = 3 * time.Second + ) + paramtable.Get().Save(Params.QueryCoordCfg.LoadTimeoutSeconds.Key, "3") + + // Partition 10 loaded + suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{ + ID: 1, + CollectionID: 100, + Channel: "100-dmc0", + Segments: map[int64]*querypb.SegmentDist{1: {NodeID: 1, Version: 0}}, + }) + suite.dist.LeaderViewManager.Update(2, &meta.LeaderView{ + ID: 2, + CollectionID: 100, + Channel: "100-dmc1", + Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2, Version: 0}}, + }) + // Partition 11 timeout + suite.dist.LeaderViewManager.Update(1, &meta.LeaderView{ + ID: 1, + CollectionID: 101, + Channel: "", + Segments: map[int64]*querypb.SegmentDist{}, + }) + + suite.Eventually(func() bool { + return suite.isPartitionLoaded(suite.partitions[100][0]) + }, timeout*2, timeout/10) + + suite.Eventually(func() bool { + return suite.isPartitionTimeout(suite.collections[1], suite.partitions[101][0]) + }, timeout*2, timeout/10) +} + func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool { exist := suite.meta.Exist(collection) - percentage := suite.meta.GetLoadPercentage(collection) + percentage := suite.meta.GetCurrentLoadPercentage(collection) status := suite.meta.GetStatus(collection) replicas := suite.meta.ReplicaManager.GetByCollection(collection) channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget) @@ -298,6 +325,25 @@ func (suite *CollectionObserverSuite) isCollectionLoaded(collection int64) bool len(segments) == len(suite.segments[collection]) } +func (suite *CollectionObserverSuite) isPartitionLoaded(partitionID int64) bool { + partition := suite.meta.GetPartition(partitionID) + if partition == nil { + return false + } + collection := partition.GetCollectionID() + percentage := suite.meta.GetPartitionLoadPercentage(partitionID) + status := partition.GetStatus() + channels := suite.targetMgr.GetDmChannelsByCollection(collection, meta.CurrentTarget) + segments := suite.targetMgr.GetHistoricalSegmentsByPartition(collection, partitionID, meta.CurrentTarget) + expectedSegments := lo.Filter(suite.segments[collection], func(seg *datapb.SegmentInfo, _ int) bool { + return seg.PartitionID == partitionID + }) + return percentage == 100 && + status == querypb.LoadStatus_Loaded && + len(channels) == len(suite.channels[collection]) && + len(segments) == len(expectedSegments) +} + func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool { exist := suite.meta.Exist(collection) replicas := suite.meta.ReplicaManager.GetByCollection(collection) @@ -309,9 +355,14 @@ func (suite *CollectionObserverSuite) isCollectionTimeout(collection int64) bool len(segments) > 0) } +func (suite *CollectionObserverSuite) isPartitionTimeout(collection int64, partitionID int64) bool { + partition := suite.meta.GetPartition(partitionID) + segments := suite.targetMgr.GetHistoricalSegmentsByPartition(collection, partitionID, meta.CurrentTarget) + return partition == nil && len(segments) == 0 +} + func (suite *CollectionObserverSuite) isCollectionLoadedContinue(collection int64, beforeTime time.Time) bool { return suite.meta.GetCollection(collection).UpdatedAt.After(beforeTime) - } func (suite *CollectionObserverSuite) loadAll() { @@ -332,32 +383,31 @@ func (suite *CollectionObserverSuite) load(collection int64) { err = suite.meta.ReplicaManager.Put(replicas...) suite.NoError(err) - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.meta.PutCollection(&meta.Collection{ - CollectionLoadInfo: &querypb.CollectionLoadInfo{ + suite.meta.PutCollection(&meta.Collection{ + CollectionLoadInfo: &querypb.CollectionLoadInfo{ + CollectionID: collection, + ReplicaNumber: suite.replicaNumber[collection], + Status: querypb.LoadStatus_Loading, + LoadType: suite.loadTypes[collection], + }, + LoadPercentage: 0, + CreatedAt: time.Now(), + }) + + for _, partition := range suite.partitions[collection] { + suite.meta.PutPartition(&meta.Partition{ + PartitionLoadInfo: &querypb.PartitionLoadInfo{ CollectionID: collection, + PartitionID: partition, ReplicaNumber: suite.replicaNumber[collection], Status: querypb.LoadStatus_Loading, }, LoadPercentage: 0, CreatedAt: time.Now(), }) - } else { - for _, partition := range suite.partitions[collection] { - suite.meta.PutPartition(&meta.Partition{ - PartitionLoadInfo: &querypb.PartitionLoadInfo{ - CollectionID: collection, - PartitionID: partition, - ReplicaNumber: suite.replicaNumber[collection], - Status: querypb.LoadStatus_Loading, - }, - LoadPercentage: 0, - CreatedAt: time.Now(), - }) - } } - allSegments := make([]*datapb.SegmentBinlogs, 0) + allSegments := make(map[int64][]*datapb.SegmentBinlogs, 0) // partitionID -> segments dmChannels := make([]*datapb.VchannelInfo, 0) for _, channel := range suite.channels[collection] { dmChannels = append(dmChannels, &datapb.VchannelInfo{ @@ -367,16 +417,15 @@ func (suite *CollectionObserverSuite) load(collection int64) { } for _, segment := range suite.segments[collection] { - allSegments = append(allSegments, &datapb.SegmentBinlogs{ + allSegments[segment.PartitionID] = append(allSegments[segment.PartitionID], &datapb.SegmentBinlogs{ SegmentID: segment.GetID(), InsertChannel: segment.GetInsertChannel(), }) - } partitions := suite.partitions[collection] for _, partition := range partitions { - suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, partition).Return(dmChannels, allSegments, nil) + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, partition).Return(dmChannels, allSegments[partition], nil) } suite.targetMgr.UpdateCollectionNextTargetWithPartitions(collection, partitions...) } diff --git a/internal/querycoordv2/observers/leader_observer_test.go b/internal/querycoordv2/observers/leader_observer_test.go index eaf49b7b3a..6e7e702c0c 100644 --- a/internal/querycoordv2/observers/leader_observer_test.go +++ b/internal/querycoordv2/observers/leader_observer_test.go @@ -97,6 +97,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegments() { ChannelName: "test-insert-channel", }, } + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( channels, segments, nil) observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) @@ -152,6 +153,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncLoadedSegments() { ChannelName: "test-insert-channel", }, } + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( channels, segments, nil) observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) @@ -209,6 +211,8 @@ func (suite *LeaderObserverTestSuite) TestIgnoreBalancedSegment() { ChannelName: "test-insert-channel", }, } + + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( channels, segments, nil) observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) @@ -247,6 +251,7 @@ func (suite *LeaderObserverTestSuite) TestSyncLoadedSegmentsWithReplicas() { ChannelName: "test-insert-channel", }, } + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( channels, segments, nil) observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) @@ -340,6 +345,7 @@ func (suite *LeaderObserverTestSuite) TestIgnoreSyncRemovedSegments() { ChannelName: "test-insert-channel", }, } + suite.broker.EXPECT().GetPartitions(mock.Anything, int64(1)).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( channels, segments, nil) observer.target.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) diff --git a/internal/querycoordv2/observers/target_observer.go b/internal/querycoordv2/observers/target_observer.go index f3bdd0c4b7..61e0f8bf74 100644 --- a/internal/querycoordv2/observers/target_observer.go +++ b/internal/querycoordv2/observers/target_observer.go @@ -37,6 +37,7 @@ type checkRequest struct { type targetUpdateRequest struct { CollectionID int64 + PartitionIDs []int64 Notifier chan error ReadyNotifier chan struct{} } @@ -108,7 +109,7 @@ func (ob *TargetObserver) schedule(ctx context.Context) { req.Notifier <- ob.targetMgr.IsCurrentTargetExist(req.CollectionID) case req := <-ob.updateChan: - err := ob.updateNextTarget(req.CollectionID) + err := ob.updateNextTarget(req.CollectionID, req.PartitionIDs...) if err != nil { close(req.ReadyNotifier) } else { @@ -148,13 +149,14 @@ func (ob *TargetObserver) check(collectionID int64) { // UpdateNextTarget updates the next target, // returns a channel which will be closed when the next target is ready, // or returns error if failed to pull target -func (ob *TargetObserver) UpdateNextTarget(collectionID int64) (chan struct{}, error) { +func (ob *TargetObserver) UpdateNextTarget(collectionID int64, partitionIDs ...int64) (chan struct{}, error) { notifier := make(chan error) readyCh := make(chan struct{}) defer close(notifier) ob.updateChan <- targetUpdateRequest{ CollectionID: collectionID, + PartitionIDs: partitionIDs, Notifier: notifier, ReadyNotifier: readyCh, } @@ -208,11 +210,16 @@ func (ob *TargetObserver) isNextTargetExpired(collectionID int64) bool { return time.Since(ob.nextTargetLastUpdate[collectionID]) > params.Params.QueryCoordCfg.NextTargetSurviveTime.GetAsDuration(time.Second) } -func (ob *TargetObserver) updateNextTarget(collectionID int64) error { - log := log.With(zap.Int64("collectionID", collectionID)) +func (ob *TargetObserver) updateNextTarget(collectionID int64, partitionIDs ...int64) error { + log := log.With(zap.Int64("collectionID", collectionID), zap.Int64s("partIDs", partitionIDs)) log.Info("observer trigger update next target") - err := ob.targetMgr.UpdateCollectionNextTarget(collectionID) + var err error + if len(partitionIDs) == 0 { + err = ob.targetMgr.UpdateCollectionNextTarget(collectionID) + } else { + err = ob.targetMgr.UpdateCollectionNextTargetWithPartitions(collectionID, partitionIDs...) + } if err != nil { log.Error("failed to update next target for collection", zap.Error(err)) diff --git a/internal/querycoordv2/observers/target_observer_test.go b/internal/querycoordv2/observers/target_observer_test.go index 838e4e2242..78aee1fb9a 100644 --- a/internal/querycoordv2/observers/target_observer_test.go +++ b/internal/querycoordv2/observers/target_observer_test.go @@ -87,6 +87,8 @@ func (suite *TargetObserverSuite) SetupTest() { err = suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(suite.collectionID, 1)) suite.NoError(err) + err = suite.meta.CollectionManager.PutPartition(utils.CreateTestPartition(suite.collectionID, suite.partitionID)) + suite.NoError(err) replicas, err := suite.meta.ReplicaManager.Spawn(suite.collectionID, 1, meta.DefaultResourceGroupName) suite.NoError(err) replicas[0].AddNode(2) @@ -115,8 +117,8 @@ func (suite *TargetObserverSuite) SetupTest() { }, } - suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, mock.Anything, mock.Anything).Return(suite.nextTargetChannels, suite.nextTargetSegments, nil) suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partitionID}, nil) + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, mock.Anything, mock.Anything).Return(suite.nextTargetChannels, suite.nextTargetSegments, nil) } func (suite *TargetObserverSuite) TestTriggerUpdateTarget() { @@ -158,12 +160,10 @@ func (suite *TargetObserverSuite) TestTriggerUpdateTarget() { suite.targetMgr.UpdateCollectionCurrentTarget(suite.collectionID) // Pull next again + suite.broker.EXPECT().GetPartitions(mock.Anything, mock.Anything).Return([]int64{suite.partitionID}, nil) suite.broker.EXPECT(). GetRecoveryInfo(mock.Anything, mock.Anything, mock.Anything). Return(suite.nextTargetChannels, suite.nextTargetSegments, nil) - suite.broker.EXPECT(). - GetPartitions(mock.Anything, mock.Anything). - Return([]int64{suite.partitionID}, nil) suite.Eventually(func() bool { return len(suite.targetMgr.GetHistoricalSegmentsByCollection(suite.collectionID, meta.NextTarget)) == 3 && len(suite.targetMgr.GetDmChannelsByCollection(suite.collectionID, meta.NextTarget)) == 2 diff --git a/internal/querycoordv2/server.go b/internal/querycoordv2/server.go index 1008aaf3e9..95c786446d 100644 --- a/internal/querycoordv2/server.go +++ b/internal/querycoordv2/server.go @@ -286,8 +286,13 @@ func (s *Server) initMeta() error { s.store = meta.NewMetaStore(s.kv) s.meta = meta.NewMeta(s.idAllocator, s.store, s.nodeMgr) + s.broker = meta.NewCoordinatorBroker( + s.dataCoord, + s.rootCoord, + ) + log.Info("recover meta...") - err := s.meta.CollectionManager.Recover() + err := s.meta.CollectionManager.Recover(s.broker) if err != nil { log.Error("failed to recover collections") return err @@ -295,6 +300,7 @@ func (s *Server) initMeta() error { collections := s.meta.GetAll() log.Info("recovering collections...", zap.Int64s("collections", collections)) metrics.QueryCoordNumCollections.WithLabelValues().Set(float64(len(collections))) + metrics.QueryCoordNumPartitions.WithLabelValues().Set(float64(len(s.meta.GetAllPartitions()))) err = s.meta.ReplicaManager.Recover(collections) if err != nil { @@ -313,10 +319,6 @@ func (s *Server) initMeta() error { ChannelDistManager: meta.NewChannelDistManager(), LeaderViewManager: meta.NewLeaderViewManager(), } - s.broker = meta.NewCoordinatorBroker( - s.dataCoord, - s.rootCoord, - ) s.targetMgr = meta.NewTargetManager(s.broker, s.meta) record.Record("Server initMeta") diff --git a/internal/querycoordv2/server_test.go b/internal/querycoordv2/server_test.go index d458a1421d..f10ce0f2a9 100644 --- a/internal/querycoordv2/server_test.go +++ b/internal/querycoordv2/server_test.go @@ -23,7 +23,6 @@ import ( "time" "github.com/cockroachdb/errors" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -42,6 +41,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/params" "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/querycoordv2/task" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/commonpbutil" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/merr" @@ -116,6 +116,7 @@ func (suite *ServerSuite) SetupTest() { ok := suite.waitNodeUp(suite.nodes[i], 5*time.Second) suite.Require().True(ok) suite.server.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, suite.nodes[i].ID) + suite.expectLoadAndReleasePartitions(suite.nodes[i]) } suite.loadAll() @@ -158,14 +159,15 @@ func (suite *ServerSuite) TestRecoverFailed() { suite.NoError(err) broker := meta.NewMockBroker(suite.T()) - broker.EXPECT().GetPartitions(context.TODO(), int64(1000)).Return(nil, errors.New("CollectionNotExist")) - broker.EXPECT().GetRecoveryInfo(context.TODO(), int64(1001), mock.Anything).Return(nil, nil, errors.New("CollectionNotExist")) + for _, collection := range suite.collections { + broker.EXPECT().GetPartitions(mock.Anything, collection).Return([]int64{1}, nil) + broker.EXPECT().GetRecoveryInfo(context.TODO(), collection, mock.Anything).Return(nil, nil, errors.New("CollectionNotExist")) + } suite.server.targetMgr = meta.NewTargetManager(broker, suite.server.meta) err = suite.server.Start() suite.NoError(err) for _, collection := range suite.collections { - suite.False(suite.server.meta.Exist(collection)) suite.Nil(suite.server.targetMgr.GetDmChannelsByCollection(collection, meta.NextTarget)) } } @@ -259,20 +261,17 @@ func (suite *ServerSuite) TestEnableActiveStandby() { Schema: &schemapb.CollectionSchema{}, }, nil).Maybe() for _, collection := range suite.collections { - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - req := &milvuspb.ShowPartitionsRequest{ - Base: commonpbutil.NewMsgBase( - commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions), - ), - CollectionID: collection, - } - mockRootCoord.EXPECT().ShowPartitionsInternal(mock.Anything, req).Return(&milvuspb.ShowPartitionsResponse{ - Status: merr.Status(nil), - PartitionIDs: suite.partitions[collection], - }, nil).Maybe() + req := &milvuspb.ShowPartitionsRequest{ + Base: commonpbutil.NewMsgBase( + commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions), + ), + CollectionID: collection, } + mockRootCoord.EXPECT().ShowPartitionsInternal(mock.Anything, req).Return(&milvuspb.ShowPartitionsResponse{ + Status: merr.Status(nil), + PartitionIDs: suite.partitions[collection], + }, nil).Maybe() suite.expectGetRecoverInfoByMockDataCoord(collection, mockDataCoord) - } err = suite.server.SetRootCoord(mockRootCoord) suite.NoError(err) @@ -385,6 +384,11 @@ func (suite *ServerSuite) expectGetRecoverInfo(collection int64) { } } +func (suite *ServerSuite) expectLoadAndReleasePartitions(querynode *mocks.MockQueryNode) { + querynode.EXPECT().LoadPartitions(mock.Anything, mock.Anything).Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil).Maybe() + querynode.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil).Maybe() +} + func (suite *ServerSuite) expectGetRecoverInfoByMockDataCoord(collection int64, dataCoord *coordMocks.DataCoord) { var ( vChannels []*datapb.VchannelInfo @@ -432,7 +436,7 @@ func (suite *ServerSuite) updateCollectionStatus(collectionID int64, status quer } collection.CollectionLoadInfo.Status = status suite.server.meta.UpdateCollection(collection) - } else { + partitions := suite.server.meta.GetPartitionsByCollection(collectionID) for _, partition := range partitions { partition := partition.Clone() @@ -488,9 +492,7 @@ func (suite *ServerSuite) hackServer() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, mock.Anything).Return(&schemapb.CollectionSchema{}, nil).Maybe() for _, collection := range suite.collections { - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil).Maybe() - } + suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil).Maybe() suite.expectGetRecoverInfo(collection) } log.Debug("server hacked") diff --git a/internal/querycoordv2/services.go b/internal/querycoordv2/services.go index fedcb8d2be..5c86548a58 100644 --- a/internal/querycoordv2/services.go +++ b/internal/querycoordv2/services.go @@ -76,9 +76,6 @@ func (s *Server) ShowCollections(ctx context.Context, req *querypb.ShowCollectio for _, collection := range s.meta.GetAllCollections() { collectionSet.Insert(collection.GetCollectionID()) } - for _, partition := range s.meta.GetAllPartitions() { - collectionSet.Insert(partition.GetCollectionID()) - } isGetAll = true } collections := collectionSet.Collect() @@ -92,7 +89,7 @@ func (s *Server) ShowCollections(ctx context.Context, req *querypb.ShowCollectio for _, collectionID := range collections { log := log.With(zap.Int64("collectionID", collectionID)) - percentage := s.meta.CollectionManager.GetLoadPercentage(collectionID) + percentage := s.meta.CollectionManager.GetCollectionLoadPercentage(collectionID) if percentage < 0 { if isGetAll { // The collection is released during this, @@ -139,67 +136,33 @@ func (s *Server) ShowPartitions(ctx context.Context, req *querypb.ShowPartitions } defer meta.GlobalFailedLoadCache.TryExpire() - // TODO(yah01): now, for load collection, the percentage of partition is equal to the percentage of collection, - // we can calculates the real percentage of partitions partitions := req.GetPartitionIDs() percentages := make([]int64, 0) - isReleased := false - switch s.meta.GetLoadType(req.GetCollectionID()) { - case querypb.LoadType_LoadCollection: - percentage := s.meta.GetLoadPercentage(req.GetCollectionID()) - if percentage < 0 { - isReleased = true - break - } - if len(partitions) == 0 { - var err error - partitions, err = s.broker.GetPartitions(ctx, req.GetCollectionID()) + if len(partitions) == 0 { + partitions = lo.Map(s.meta.GetPartitionsByCollection(req.GetCollectionID()), func(partition *meta.Partition, _ int) int64 { + return partition.GetPartitionID() + }) + } + for _, partitionID := range partitions { + percentage := s.meta.GetPartitionLoadPercentage(partitionID) + if percentage < 0 { + err := meta.GlobalFailedLoadCache.Get(req.GetCollectionID()) if err != nil { - msg := "failed to show partitions" - log.Warn(msg, zap.Error(err)) + status := merr.Status(err) + status.ErrorCode = commonpb.ErrorCode_InsufficientMemoryToLoad + log.Warn("show partition failed", zap.Error(err)) return &querypb.ShowPartitionsResponse{ - Status: utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg, err), + Status: status, }, nil } - } - for range partitions { - percentages = append(percentages, int64(percentage)) - } - - case querypb.LoadType_LoadPartition: - if len(partitions) == 0 { - partitions = lo.Map(s.meta.GetPartitionsByCollection(req.GetCollectionID()), func(partition *meta.Partition, _ int) int64 { - return partition.GetPartitionID() - }) - } - for _, partitionID := range partitions { - partition := s.meta.GetPartition(partitionID) - if partition == nil { - isReleased = true - break - } - percentages = append(percentages, int64(partition.LoadPercentage)) - } - - default: - isReleased = true - } - - if isReleased { - err := meta.GlobalFailedLoadCache.Get(req.GetCollectionID()) - if err != nil { - status := merr.Status(err) - status.ErrorCode = commonpb.ErrorCode_InsufficientMemoryToLoad + msg := fmt.Sprintf("partition %d has not been loaded to memory or load failed", partitionID) + log.Warn(msg) return &querypb.ShowPartitionsResponse{ - Status: status, + Status: utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg), }, nil } - msg := fmt.Sprintf("collection %v has not been loaded into QueryNode", req.GetCollectionID()) - log.Warn(msg) - return &querypb.ShowPartitionsResponse{ - Status: utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg), - }, nil + percentages = append(percentages, int64(percentage)) } return &querypb.ShowPartitionsResponse{ @@ -246,7 +209,9 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection req, s.dist, s.meta, + s.cluster, s.targetMgr, + s.targetObserver, s.broker, s.nodeMgr, ) @@ -340,7 +305,9 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions req, s.dist, s.meta, + s.cluster, s.targetMgr, + s.targetObserver, s.broker, s.nodeMgr, ) @@ -401,6 +368,7 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart req, s.dist, s.meta, + s.cluster, s.targetMgr, s.targetObserver, ) @@ -528,6 +496,31 @@ func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfo }, nil } +func (s *Server) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest) (*commonpb.Status, error) { + log := log.Ctx(ctx).With( + zap.Int64("collectionID", req.GetCollectionID()), + zap.Int64("partitionID", req.GetPartitionID()), + ) + + log.Info("received sync new created partition request") + + failedMsg := "failed to sync new created partition" + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(failedMsg, zap.Error(ErrNotHealthy)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, failedMsg, ErrNotHealthy), nil + } + + syncJob := job.NewSyncNewCreatedPartitionJob(ctx, req, s.meta, s.cluster) + s.jobScheduler.Add(syncJob) + err := syncJob.Wait() + if err != nil && !errors.Is(err, job.ErrPartitionNotInTarget) { + log.Warn(failedMsg, zap.Error(err)) + return utils.WrapStatus(errCode(err), failedMsg, err), nil + } + + return merr.Status(nil), nil +} + // refreshCollection must be called after loading a collection. It looks for new segments that are not loaded yet and // tries to load them up. It returns when all segments of the given collection are loaded, or when error happens. // Note that a collection's loading progress always stays at 100% after a successful load and will not get updated @@ -547,7 +540,7 @@ func (s *Server) refreshCollection(ctx context.Context, collID int64) (*commonpb } // Check that collection is fully loaded. - if s.meta.CollectionManager.GetLoadPercentage(collID) != 100 { + if s.meta.CollectionManager.GetCurrentLoadPercentage(collID) != 100 { errMsg := "a collection must be fully loaded before refreshing" log.Warn(errMsg) return &commonpb.Status{ @@ -601,7 +594,7 @@ func (s *Server) refreshPartitions(ctx context.Context, collID int64, partIDs [] } // Check that all partitions are fully loaded. - if s.meta.CollectionManager.GetLoadPercentage(collID) != 100 { + if s.meta.CollectionManager.GetCurrentLoadPercentage(collID) != 100 { errMsg := "partitions must be fully loaded before refreshing" log.Warn(errMsg) return &commonpb.Status{ @@ -671,7 +664,7 @@ func (s *Server) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques log.Warn(msg, zap.Int("source-nodes-num", len(req.GetSourceNodeIDs()))) return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg), nil } - if s.meta.CollectionManager.GetLoadPercentage(req.GetCollectionID()) < 100 { + if s.meta.CollectionManager.GetCurrentLoadPercentage(req.GetCollectionID()) < 100 { msg := "can't balance segments of not fully loaded collection" log.Warn(msg) return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg), nil @@ -845,7 +838,7 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade Status: merr.Status(nil), } - if s.meta.CollectionManager.GetLoadPercentage(req.GetCollectionID()) < 100 { + if s.meta.CollectionManager.GetCurrentLoadPercentage(req.GetCollectionID()) < 100 { msg := fmt.Sprintf("collection %v is not fully loaded", req.GetCollectionID()) log.Warn(msg) resp.Status = utils.WrapStatus(commonpb.ErrorCode_NoReplicaAvailable, msg) diff --git a/internal/querycoordv2/services_test.go b/internal/querycoordv2/services_test.go index b76b05d830..4d03ce8395 100644 --- a/internal/querycoordv2/services_test.go +++ b/internal/querycoordv2/services_test.go @@ -142,6 +142,7 @@ func (suite *ServiceSuite) SetupTest() { suite.dist, suite.broker, ) + suite.targetObserver.Start(context.Background()) for _, node := range suite.nodes { suite.nodeMgr.Add(session.NewNodeInfo(node, "localhost")) err := suite.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, node) @@ -311,7 +312,6 @@ func (suite *ServiceSuite) TestLoadCollection() { // Test load all collections for _, collection := range suite.collections { - suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) suite.expectGetRecoverInfo(collection) req := &querypb.LoadCollectionRequest{ @@ -776,6 +776,10 @@ func (suite *ServiceSuite) TestLoadPartition() { // Test load all partitions for _, collection := range suite.collections { + suite.broker.EXPECT().GetPartitions(mock.Anything, collection). + Return(append(suite.partitions[collection], 999), nil) + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, collection, int64(999)). + Return(nil, nil, nil) suite.expectGetRecoverInfo(collection) req := &querypb.LoadPartitionsRequest{ @@ -808,6 +812,36 @@ func (suite *ServiceSuite) TestLoadPartition() { suite.NoError(err) suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) + // Test load with collection loaded + for _, collection := range suite.collections { + if suite.loadTypes[collection] != querypb.LoadType_LoadCollection { + continue + } + req := &querypb.LoadPartitionsRequest{ + CollectionID: collection, + PartitionIDs: suite.partitions[collection], + } + resp, err := server.LoadPartitions(ctx, req) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) + } + + // Test load with more partitions + suite.cluster.EXPECT().LoadPartitions(mock.Anything, mock.Anything, mock.Anything). + Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil) + for _, collection := range suite.collections { + if suite.loadTypes[collection] != querypb.LoadType_LoadPartition { + continue + } + req := &querypb.LoadPartitionsRequest{ + CollectionID: collection, + PartitionIDs: append(suite.partitions[collection], 999), + } + resp, err := server.LoadPartitions(ctx, req) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) + } + // Test when server is not healthy server.UpdateStateCode(commonpb.StateCode_Initializing) req = &querypb.LoadPartitionsRequest{ @@ -836,36 +870,6 @@ func (suite *ServiceSuite) TestLoadPartitionFailed() { suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) suite.Contains(resp.Reason, job.ErrLoadParameterMismatched.Error()) } - - // Test load with collection loaded - for _, collection := range suite.collections { - if suite.loadTypes[collection] != querypb.LoadType_LoadCollection { - continue - } - req := &querypb.LoadPartitionsRequest{ - CollectionID: collection, - PartitionIDs: suite.partitions[collection], - } - resp, err := server.LoadPartitions(ctx, req) - suite.NoError(err) - suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) - suite.Contains(resp.Reason, job.ErrLoadParameterMismatched.Error()) - } - - // Test load with more partitions - for _, collection := range suite.collections { - if suite.loadTypes[collection] != querypb.LoadType_LoadPartition { - continue - } - req := &querypb.LoadPartitionsRequest{ - CollectionID: collection, - PartitionIDs: append(suite.partitions[collection], 999), - } - resp, err := server.LoadPartitions(ctx, req) - suite.NoError(err) - suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) - suite.Contains(resp.Reason, job.ErrLoadParameterMismatched.Error()) - } } func (suite *ServiceSuite) TestReleaseCollection() { @@ -910,6 +914,8 @@ func (suite *ServiceSuite) TestReleasePartition() { server := suite.server // Test release all partitions + suite.cluster.EXPECT().ReleasePartitions(mock.Anything, mock.Anything, mock.Anything). + Return(utils.WrapStatus(commonpb.ErrorCode_Success, ""), nil) for _, collection := range suite.collections { req := &querypb.ReleasePartitionsRequest{ CollectionID: collection, @@ -917,11 +923,7 @@ func (suite *ServiceSuite) TestReleasePartition() { } resp, err := server.ReleasePartitions(ctx, req) suite.NoError(err) - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.ErrorCode) - } else { - suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) - } + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) suite.assertPartitionLoaded(collection, suite.partitions[collection][1:]...) } @@ -933,11 +935,7 @@ func (suite *ServiceSuite) TestReleasePartition() { } resp, err := server.ReleasePartitions(ctx, req) suite.NoError(err) - if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.ErrorCode) - } else { - suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) - } + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) suite.assertPartitionLoaded(collection, suite.partitions[collection][1:]...) } @@ -957,7 +955,6 @@ func (suite *ServiceSuite) TestRefreshCollection() { defer cancel() server := suite.server - suite.targetObserver.Start(context.Background()) suite.server.collectionObserver.Start(context.Background()) // Test refresh all collections. @@ -970,7 +967,6 @@ func (suite *ServiceSuite) TestRefreshCollection() { // Test load all collections for _, collection := range suite.collections { - suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) suite.expectGetRecoverInfo(collection) req := &querypb.LoadCollectionRequest{ @@ -1023,7 +1019,6 @@ func (suite *ServiceSuite) TestRefreshPartitions() { defer cancel() server := suite.server - suite.targetObserver.Start(context.Background()) suite.server.collectionObserver.Start(context.Background()) // Test refresh all partitions. @@ -1636,8 +1631,6 @@ func (suite *ServiceSuite) loadAll() { for _, collection := range suite.collections { suite.expectGetRecoverInfo(collection) if suite.loadTypes[collection] == querypb.LoadType_LoadCollection { - suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) - req := &querypb.LoadCollectionRequest{ CollectionID: collection, ReplicaNumber: suite.replicaNumber[collection], @@ -1647,7 +1640,9 @@ func (suite *ServiceSuite) loadAll() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -1669,7 +1664,9 @@ func (suite *ServiceSuite) loadAll() { req, suite.dist, suite.meta, + suite.cluster, suite.targetMgr, + suite.targetObserver, suite.broker, suite.nodeMgr, ) @@ -1741,6 +1738,7 @@ func (suite *ServiceSuite) assertSegments(collection int64, segments []*querypb. } func (suite *ServiceSuite) expectGetRecoverInfo(collection int64) { + suite.broker.EXPECT().GetPartitions(mock.Anything, collection).Return(suite.partitions[collection], nil) vChannels := []*datapb.VchannelInfo{} for _, channel := range suite.channels[collection] { vChannels = append(vChannels, &datapb.VchannelInfo{ @@ -1848,7 +1846,7 @@ func (suite *ServiceSuite) updateCollectionStatus(collectionID int64, status que } collection.CollectionLoadInfo.Status = status suite.meta.UpdateCollection(collection) - } else { + partitions := suite.meta.GetPartitionsByCollection(collectionID) for _, partition := range partitions { partition := partition.Clone() @@ -1869,6 +1867,10 @@ func (suite *ServiceSuite) fetchHeartbeats(time time.Time) { } } +func (suite *ServiceSuite) TearDownTest() { + suite.targetObserver.Stop() +} + func TestService(t *testing.T) { suite.Run(t, new(ServiceSuite)) } diff --git a/internal/querycoordv2/session/cluster.go b/internal/querycoordv2/session/cluster.go index cc430a3b84..cc2ba043e7 100644 --- a/internal/querycoordv2/session/cluster.go +++ b/internal/querycoordv2/session/cluster.go @@ -53,6 +53,8 @@ type Cluster interface { UnsubDmChannel(ctx context.Context, nodeID int64, req *querypb.UnsubDmChannelRequest) (*commonpb.Status, error) LoadSegments(ctx context.Context, nodeID int64, req *querypb.LoadSegmentsRequest) (*commonpb.Status, error) ReleaseSegments(ctx context.Context, nodeID int64, req *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) + LoadPartitions(ctx context.Context, nodeID int64, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) + ReleasePartitions(ctx context.Context, nodeID int64, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) GetDataDistribution(ctx context.Context, nodeID int64, req *querypb.GetDataDistributionRequest) (*querypb.GetDataDistributionResponse, error) GetMetrics(ctx context.Context, nodeID int64, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) SyncDistribution(ctx context.Context, nodeID int64, req *querypb.SyncDistributionRequest) (*commonpb.Status, error) @@ -174,6 +176,34 @@ func (c *QueryCluster) ReleaseSegments(ctx context.Context, nodeID int64, req *q return status, err } +func (c *QueryCluster) LoadPartitions(ctx context.Context, nodeID int64, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + var status *commonpb.Status + var err error + err1 := c.send(ctx, nodeID, func(cli types.QueryNode) { + req := proto.Clone(req).(*querypb.LoadPartitionsRequest) + req.Base.TargetID = nodeID + status, err = cli.LoadPartitions(ctx, req) + }) + if err1 != nil { + return nil, err1 + } + return status, err +} + +func (c *QueryCluster) ReleasePartitions(ctx context.Context, nodeID int64, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { + var status *commonpb.Status + var err error + err1 := c.send(ctx, nodeID, func(cli types.QueryNode) { + req := proto.Clone(req).(*querypb.ReleasePartitionsRequest) + req.Base.TargetID = nodeID + status, err = cli.ReleasePartitions(ctx, req) + }) + if err1 != nil { + return nil, err1 + } + return status, err +} + func (c *QueryCluster) GetDataDistribution(ctx context.Context, nodeID int64, req *querypb.GetDataDistributionRequest) (*querypb.GetDataDistributionResponse, error) { var resp *querypb.GetDataDistributionResponse var err error diff --git a/internal/querycoordv2/session/cluster_test.go b/internal/querycoordv2/session/cluster_test.go index bbc06d374c..35245f72fa 100644 --- a/internal/querycoordv2/session/cluster_test.go +++ b/internal/querycoordv2/session/cluster_test.go @@ -124,6 +124,14 @@ func (suite *ClusterTestSuite) createDefaultMockServer() querypb.QueryNodeServer mock.Anything, mock.AnythingOfType("*querypb.ReleaseSegmentsRequest"), ).Maybe().Return(succStatus, nil) + svr.EXPECT().LoadPartitions( + mock.Anything, + mock.AnythingOfType("*querypb.LoadPartitionsRequest"), + ).Maybe().Return(succStatus, nil) + svr.EXPECT().ReleasePartitions( + mock.Anything, + mock.AnythingOfType("*querypb.ReleasePartitionsRequest"), + ).Maybe().Return(succStatus, nil) svr.EXPECT().GetDataDistribution( mock.Anything, mock.AnythingOfType("*querypb.GetDataDistributionRequest"), @@ -169,6 +177,14 @@ func (suite *ClusterTestSuite) createFailedMockServer() querypb.QueryNodeServer mock.Anything, mock.AnythingOfType("*querypb.ReleaseSegmentsRequest"), ).Maybe().Return(failStatus, nil) + svr.EXPECT().LoadPartitions( + mock.Anything, + mock.AnythingOfType("*querypb.LoadPartitionsRequest"), + ).Maybe().Return(failStatus, nil) + svr.EXPECT().ReleasePartitions( + mock.Anything, + mock.AnythingOfType("*querypb.ReleasePartitionsRequest"), + ).Maybe().Return(failStatus, nil) svr.EXPECT().GetDataDistribution( mock.Anything, mock.AnythingOfType("*querypb.GetDataDistributionRequest"), @@ -284,6 +300,45 @@ func (suite *ClusterTestSuite) TestReleaseSegments() { }, status) } +func (suite *ClusterTestSuite) TestLoadAndReleasePartitions() { + ctx := context.TODO() + status, err := suite.cluster.LoadPartitions(ctx, 0, &querypb.LoadPartitionsRequest{ + Base: &commonpb.MsgBase{}, + }) + suite.NoError(err) + suite.Equal(&commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, status) + + status, err = suite.cluster.LoadPartitions(ctx, 1, &querypb.LoadPartitionsRequest{ + Base: &commonpb.MsgBase{}, + }) + suite.NoError(err) + suite.Equal(&commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: "unexpected error", + }, status) + + status, err = suite.cluster.ReleasePartitions(ctx, 0, &querypb.ReleasePartitionsRequest{ + Base: &commonpb.MsgBase{}, + }) + suite.NoError(err) + suite.Equal(&commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, status) + + status, err = suite.cluster.ReleasePartitions(ctx, 1, &querypb.ReleasePartitionsRequest{ + Base: &commonpb.MsgBase{}, + }) + suite.NoError(err) + suite.Equal(&commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: "unexpected error", + }, status) +} + func (suite *ClusterTestSuite) TestGetDataDistribution() { ctx := context.TODO() resp, err := suite.cluster.GetDataDistribution(ctx, 0, &querypb.GetDataDistributionRequest{ diff --git a/internal/querycoordv2/session/mock_cluster.go b/internal/querycoordv2/session/mock_cluster.go index d67a6b0292..b541b188c9 100644 --- a/internal/querycoordv2/session/mock_cluster.go +++ b/internal/querycoordv2/session/mock_cluster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.16.0. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package session @@ -56,8 +56,8 @@ type MockCluster_GetComponentStates_Call struct { } // GetComponentStates is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 +// - ctx context.Context +// - nodeID int64 func (_e *MockCluster_Expecter) GetComponentStates(ctx interface{}, nodeID interface{}) *MockCluster_GetComponentStates_Call { return &MockCluster_GetComponentStates_Call{Call: _e.mock.On("GetComponentStates", ctx, nodeID)} } @@ -103,9 +103,9 @@ type MockCluster_GetDataDistribution_Call struct { } // GetDataDistribution is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.GetDataDistributionRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.GetDataDistributionRequest func (_e *MockCluster_Expecter) GetDataDistribution(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_GetDataDistribution_Call { return &MockCluster_GetDataDistribution_Call{Call: _e.mock.On("GetDataDistribution", ctx, nodeID, req)} } @@ -151,9 +151,9 @@ type MockCluster_GetMetrics_Call struct { } // GetMetrics is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *milvuspb.GetMetricsRequest +// - ctx context.Context +// - nodeID int64 +// - req *milvuspb.GetMetricsRequest func (_e *MockCluster_Expecter) GetMetrics(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_GetMetrics_Call { return &MockCluster_GetMetrics_Call{Call: _e.mock.On("GetMetrics", ctx, nodeID, req)} } @@ -170,6 +170,54 @@ func (_c *MockCluster_GetMetrics_Call) Return(_a0 *milvuspb.GetMetricsResponse, return _c } +// LoadPartitions provides a mock function with given fields: ctx, nodeID, req +func (_m *MockCluster) LoadPartitions(ctx context.Context, nodeID int64, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + ret := _m.Called(ctx, nodeID, req) + + var r0 *commonpb.Status + if rf, ok := ret.Get(0).(func(context.Context, int64, *querypb.LoadPartitionsRequest) *commonpb.Status); ok { + r0 = rf(ctx, nodeID, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*commonpb.Status) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, *querypb.LoadPartitionsRequest) error); ok { + r1 = rf(ctx, nodeID, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockCluster_LoadPartitions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadPartitions' +type MockCluster_LoadPartitions_Call struct { + *mock.Call +} + +// LoadPartitions is a helper method to define mock.On call +// - ctx context.Context +// - nodeID int64 +// - req *querypb.LoadPartitionsRequest +func (_e *MockCluster_Expecter) LoadPartitions(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_LoadPartitions_Call { + return &MockCluster_LoadPartitions_Call{Call: _e.mock.On("LoadPartitions", ctx, nodeID, req)} +} + +func (_c *MockCluster_LoadPartitions_Call) Run(run func(ctx context.Context, nodeID int64, req *querypb.LoadPartitionsRequest)) *MockCluster_LoadPartitions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64), args[2].(*querypb.LoadPartitionsRequest)) + }) + return _c +} + +func (_c *MockCluster_LoadPartitions_Call) Return(_a0 *commonpb.Status, _a1 error) *MockCluster_LoadPartitions_Call { + _c.Call.Return(_a0, _a1) + return _c +} + // LoadSegments provides a mock function with given fields: ctx, nodeID, req func (_m *MockCluster) LoadSegments(ctx context.Context, nodeID int64, req *querypb.LoadSegmentsRequest) (*commonpb.Status, error) { ret := _m.Called(ctx, nodeID, req) @@ -199,9 +247,9 @@ type MockCluster_LoadSegments_Call struct { } // LoadSegments is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.LoadSegmentsRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.LoadSegmentsRequest func (_e *MockCluster_Expecter) LoadSegments(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_LoadSegments_Call { return &MockCluster_LoadSegments_Call{Call: _e.mock.On("LoadSegments", ctx, nodeID, req)} } @@ -218,6 +266,54 @@ func (_c *MockCluster_LoadSegments_Call) Return(_a0 *commonpb.Status, _a1 error) return _c } +// ReleasePartitions provides a mock function with given fields: ctx, nodeID, req +func (_m *MockCluster) ReleasePartitions(ctx context.Context, nodeID int64, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { + ret := _m.Called(ctx, nodeID, req) + + var r0 *commonpb.Status + if rf, ok := ret.Get(0).(func(context.Context, int64, *querypb.ReleasePartitionsRequest) *commonpb.Status); ok { + r0 = rf(ctx, nodeID, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*commonpb.Status) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, *querypb.ReleasePartitionsRequest) error); ok { + r1 = rf(ctx, nodeID, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockCluster_ReleasePartitions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReleasePartitions' +type MockCluster_ReleasePartitions_Call struct { + *mock.Call +} + +// ReleasePartitions is a helper method to define mock.On call +// - ctx context.Context +// - nodeID int64 +// - req *querypb.ReleasePartitionsRequest +func (_e *MockCluster_Expecter) ReleasePartitions(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_ReleasePartitions_Call { + return &MockCluster_ReleasePartitions_Call{Call: _e.mock.On("ReleasePartitions", ctx, nodeID, req)} +} + +func (_c *MockCluster_ReleasePartitions_Call) Run(run func(ctx context.Context, nodeID int64, req *querypb.ReleasePartitionsRequest)) *MockCluster_ReleasePartitions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64), args[2].(*querypb.ReleasePartitionsRequest)) + }) + return _c +} + +func (_c *MockCluster_ReleasePartitions_Call) Return(_a0 *commonpb.Status, _a1 error) *MockCluster_ReleasePartitions_Call { + _c.Call.Return(_a0, _a1) + return _c +} + // ReleaseSegments provides a mock function with given fields: ctx, nodeID, req func (_m *MockCluster) ReleaseSegments(ctx context.Context, nodeID int64, req *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) { ret := _m.Called(ctx, nodeID, req) @@ -247,9 +343,9 @@ type MockCluster_ReleaseSegments_Call struct { } // ReleaseSegments is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.ReleaseSegmentsRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.ReleaseSegmentsRequest func (_e *MockCluster_Expecter) ReleaseSegments(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_ReleaseSegments_Call { return &MockCluster_ReleaseSegments_Call{Call: _e.mock.On("ReleaseSegments", ctx, nodeID, req)} } @@ -277,7 +373,7 @@ type MockCluster_Start_Call struct { } // Start is a helper method to define mock.On call -// - ctx context.Context +// - ctx context.Context func (_e *MockCluster_Expecter) Start(ctx interface{}) *MockCluster_Start_Call { return &MockCluster_Start_Call{Call: _e.mock.On("Start", ctx)} } @@ -350,9 +446,9 @@ type MockCluster_SyncDistribution_Call struct { } // SyncDistribution is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.SyncDistributionRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.SyncDistributionRequest func (_e *MockCluster_Expecter) SyncDistribution(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_SyncDistribution_Call { return &MockCluster_SyncDistribution_Call{Call: _e.mock.On("SyncDistribution", ctx, nodeID, req)} } @@ -398,9 +494,9 @@ type MockCluster_UnsubDmChannel_Call struct { } // UnsubDmChannel is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.UnsubDmChannelRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.UnsubDmChannelRequest func (_e *MockCluster_Expecter) UnsubDmChannel(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_UnsubDmChannel_Call { return &MockCluster_UnsubDmChannel_Call{Call: _e.mock.On("UnsubDmChannel", ctx, nodeID, req)} } @@ -446,9 +542,9 @@ type MockCluster_WatchDmChannels_Call struct { } // WatchDmChannels is a helper method to define mock.On call -// - ctx context.Context -// - nodeID int64 -// - req *querypb.WatchDmChannelsRequest +// - ctx context.Context +// - nodeID int64 +// - req *querypb.WatchDmChannelsRequest func (_e *MockCluster_Expecter) WatchDmChannels(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_WatchDmChannels_Call { return &MockCluster_WatchDmChannels_Call{Call: _e.mock.On("WatchDmChannels", ctx, nodeID, req)} } diff --git a/internal/querycoordv2/task/executor.go b/internal/querycoordv2/task/executor.go index b5204f6bca..1386472d98 100644 --- a/internal/querycoordv2/task/executor.go +++ b/internal/querycoordv2/task/executor.go @@ -247,7 +247,7 @@ func (ex *Executor) loadSegment(task *SegmentTask, step int) error { log.Warn("failed to get schema of collection", zap.Error(err)) return err } - partitions, err := utils.GetPartitions(ex.meta.CollectionManager, ex.broker, task.CollectionID()) + partitions, err := utils.GetPartitions(ex.meta.CollectionManager, task.CollectionID()) if err != nil { log.Warn("failed to get partitions of collection", zap.Error(err)) return err @@ -388,7 +388,7 @@ func (ex *Executor) subDmChannel(task *ChannelTask, step int) error { log.Warn("failed to get schema of collection") return err } - partitions, err := utils.GetPartitions(ex.meta.CollectionManager, ex.broker, task.CollectionID()) + partitions, err := utils.GetPartitions(ex.meta.CollectionManager, task.CollectionID()) if err != nil { log.Warn("failed to get partitions of collection") return err diff --git a/internal/querycoordv2/task/task_test.go b/internal/querycoordv2/task/task_test.go index 2d00eb5224..d752ad55ae 100644 --- a/internal/querycoordv2/task/task_test.go +++ b/internal/querycoordv2/task/task_test.go @@ -185,8 +185,6 @@ func (suite *TaskSuite) TestSubscribeChannelTask() { Return(&schemapb.CollectionSchema{ Name: "TestSubscribeChannelTask", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection). - Return([]int64{100, 101}, nil) channels := make([]*datapb.VchannelInfo, 0, len(suite.subChannels)) for _, channel := range suite.subChannels { channels = append(channels, &datapb.VchannelInfo{ @@ -234,6 +232,7 @@ func (suite *TaskSuite) TestSubscribeChannelTask() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) suite.AssertTaskNum(0, len(suite.subChannels), len(suite.subChannels), 0) @@ -293,6 +292,7 @@ func (suite *TaskSuite) TestUnsubscribeChannelTask() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(dmChannels, nil, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) @@ -333,7 +333,6 @@ func (suite *TaskSuite) TestLoadSegmentTask() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, suite.collection).Return(&schemapb.CollectionSchema{ Name: "TestLoadSegmentTask", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{100, 101}, nil) for _, segment := range suite.loadSegments { suite.broker.EXPECT().GetSegmentInfo(mock.Anything, segment).Return(&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{ { @@ -374,6 +373,7 @@ func (suite *TaskSuite) TestLoadSegmentTask() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segments, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) segmentsNum := len(suite.loadSegments) @@ -417,7 +417,6 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, suite.collection).Return(&schemapb.CollectionSchema{ Name: "TestLoadSegmentTask", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{100, 101}, nil) for _, segment := range suite.loadSegments { suite.broker.EXPECT().GetSegmentInfo(mock.Anything, segment).Return(&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{ { @@ -455,6 +454,7 @@ func (suite *TaskSuite) TestLoadSegmentTaskFailed() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) segmentsNum := len(suite.loadSegments) @@ -610,7 +610,6 @@ func (suite *TaskSuite) TestMoveSegmentTask() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, suite.collection).Return(&schemapb.CollectionSchema{ Name: "TestMoveSegmentTask", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{100, 101}, nil) for _, segment := range suite.moveSegments { suite.broker.EXPECT().GetSegmentInfo(mock.Anything, segment).Return(&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{ { @@ -665,6 +664,7 @@ func (suite *TaskSuite) TestMoveSegmentTask() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return([]*datapb.VchannelInfo{vchannel}, segmentInfos, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) suite.target.UpdateCollectionCurrentTarget(suite.collection, int64(1)) @@ -709,7 +709,6 @@ func (suite *TaskSuite) TestTaskCanceled() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, suite.collection).Return(&schemapb.CollectionSchema{ Name: "TestSubscribeChannelTask", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{100, 101}, nil) for _, segment := range suite.loadSegments { suite.broker.EXPECT().GetSegmentInfo(mock.Anything, segment).Return(&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{ { @@ -752,6 +751,7 @@ func (suite *TaskSuite) TestTaskCanceled() { } segmentsNum := len(suite.loadSegments) suite.AssertTaskNum(0, segmentsNum, 0, segmentsNum) + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{partition}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, partition).Return(nil, segmentInfos, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, partition) @@ -787,7 +787,6 @@ func (suite *TaskSuite) TestSegmentTaskStale() { suite.broker.EXPECT().GetCollectionSchema(mock.Anything, suite.collection).Return(&schemapb.CollectionSchema{ Name: "TestSegmentTaskStale", }, nil) - suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{100, 101}, nil) for _, segment := range suite.loadSegments { suite.broker.EXPECT().GetSegmentInfo(mock.Anything, segment).Return(&datapb.GetSegmentInfoResponse{Infos: []*datapb.SegmentInfo{ { @@ -829,6 +828,7 @@ func (suite *TaskSuite) TestSegmentTaskStale() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segmentInfos, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) segmentsNum := len(suite.loadSegments) @@ -856,6 +856,10 @@ func (suite *TaskSuite) TestSegmentTaskStale() { InsertChannel: channel.GetChannelName(), }) } + bakExpectations := suite.broker.ExpectedCalls + suite.broker.AssertExpectations(suite.T()) + suite.broker.ExpectedCalls = suite.broker.ExpectedCalls[:0] + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{2}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(2)).Return(nil, segmentInfos, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(2)) suite.dispatchAndWait(targetNode) @@ -870,6 +874,7 @@ func (suite *TaskSuite) TestSegmentTaskStale() { suite.NoError(task.Err()) } } + suite.broker.ExpectedCalls = bakExpectations } func (suite *TaskSuite) TestChannelTaskReplace() { @@ -1060,6 +1065,7 @@ func (suite *TaskSuite) TestNoExecutor() { err = suite.scheduler.Add(task) suite.NoError(err) } + suite.broker.EXPECT().GetPartitions(mock.Anything, suite.collection).Return([]int64{1}, nil) suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, suite.collection, int64(1)).Return(nil, segments, nil) suite.target.UpdateCollectionNextTargetWithPartitions(suite.collection, int64(1)) segmentsNum := len(suite.loadSegments) diff --git a/internal/querycoordv2/utils/meta.go b/internal/querycoordv2/utils/meta.go index ee087a4a56..2cd8e35781 100644 --- a/internal/querycoordv2/utils/meta.go +++ b/internal/querycoordv2/utils/meta.go @@ -17,7 +17,6 @@ package utils import ( - "context" "fmt" "math/rand" "sort" @@ -51,18 +50,15 @@ func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeM return nodes } -func GetPartitions(collectionMgr *meta.CollectionManager, broker meta.Broker, collectionID int64) ([]int64, error) { +func GetPartitions(collectionMgr *meta.CollectionManager, collectionID int64) ([]int64, error) { collection := collectionMgr.GetCollection(collectionID) if collection != nil { - partitions, err := broker.GetPartitions(context.Background(), collectionID) - return partitions, err - } - - partitions := collectionMgr.GetPartitionsByCollection(collectionID) - if partitions != nil { - return lo.Map(partitions, func(partition *meta.Partition, i int) int64 { - return partition.PartitionID - }), nil + partitions := collectionMgr.GetPartitionsByCollection(collectionID) + if partitions != nil { + return lo.Map(partitions, func(partition *meta.Partition, i int) int64 { + return partition.PartitionID + }), nil + } } // todo(yah01): replace this error with a defined error diff --git a/internal/querynode/flow_graph_delete_node_test.go b/internal/querynode/flow_graph_delete_node_test.go index dcf6c19a1f..5ef00fa095 100644 --- a/internal/querynode/flow_graph_delete_node_test.go +++ b/internal/querynode/flow_graph_delete_node_test.go @@ -248,7 +248,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) { }, } msg := []flowgraph.Msg{&dMsg} - assert.Panics(t, func() { deleteNode.Operate(msg) }) + deleteNode.Operate(msg) }) t.Run("test partition not exist", func(t *testing.T) { diff --git a/internal/querynode/flow_graph_filter_delete_node.go b/internal/querynode/flow_graph_filter_delete_node.go index 8ce301ec6d..8a28df3259 100644 --- a/internal/querynode/flow_graph_filter_delete_node.go +++ b/internal/querynode/flow_graph_filter_delete_node.go @@ -139,12 +139,12 @@ func (fddNode *filterDeleteNode) filterInvalidDeleteMessage(msg *msgstream.Delet return nil, nil } - if loadType == loadTypePartition { - if !fddNode.metaReplica.hasPartition(msg.PartitionID) { - // filter out msg which not belongs to the loaded partitions - return nil, nil - } - } + //if loadType == loadTypePartition { + // if !fddNode.metaReplica.hasPartition(msg.PartitionID) { + // // filter out msg which not belongs to the loaded partitions + // return nil, nil + // } + //} return msg, nil } diff --git a/internal/querynode/flow_graph_filter_delete_node_test.go b/internal/querynode/flow_graph_filter_delete_node_test.go index 9f6cd88752..4c222aec38 100644 --- a/internal/querynode/flow_graph_filter_delete_node_test.go +++ b/internal/querynode/flow_graph_filter_delete_node_test.go @@ -89,7 +89,7 @@ func TestFlowGraphFilterDeleteNode_filterInvalidDeleteMessage(t *testing.T) { res, err := fg.filterInvalidDeleteMessage(msg, loadTypePartition) assert.NoError(t, err) - assert.Nil(t, res) + assert.NotNil(t, res) }) } diff --git a/internal/querynode/flow_graph_filter_dm_node.go b/internal/querynode/flow_graph_filter_dm_node.go index 90d9a5f5ae..ea8bfc6f63 100644 --- a/internal/querynode/flow_graph_filter_dm_node.go +++ b/internal/querynode/flow_graph_filter_dm_node.go @@ -162,12 +162,12 @@ func (fdmNode *filterDmNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg return nil, nil } - if loadType == loadTypePartition { - if !fdmNode.metaReplica.hasPartition(msg.PartitionID) { - // filter out msg which not belongs to the loaded partitions - return nil, nil - } - } + //if loadType == loadTypePartition { + // if !fdmNode.metaReplica.hasPartition(msg.PartitionID) { + // // filter out msg which not belongs to the loaded partitions + // return nil, nil + // } + //} return msg, nil } @@ -198,12 +198,12 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg return nil, nil } - if loadType == loadTypePartition { - if !fdmNode.metaReplica.hasPartition(msg.PartitionID) { - // filter out msg which not belongs to the loaded partitions - return nil, nil - } - } + //if loadType == loadTypePartition { + // if !fdmNode.metaReplica.hasPartition(msg.PartitionID) { + // // filter out msg which not belongs to the loaded partitions + // return nil, nil + // } + //} // Check if the segment is in excluded segments, // messages after seekPosition may contain the redundant data from flushed slice of segment, diff --git a/internal/querynode/flow_graph_filter_dm_node_test.go b/internal/querynode/flow_graph_filter_dm_node_test.go index c87b7f01aa..66e4e6f6d6 100644 --- a/internal/querynode/flow_graph_filter_dm_node_test.go +++ b/internal/querynode/flow_graph_filter_dm_node_test.go @@ -71,18 +71,6 @@ func TestFlowGraphFilterDmNode_filterInvalidInsertMessage(t *testing.T) { fg.collectionID = defaultCollectionID }) - t.Run("test no partition", func(t *testing.T) { - msg, err := genSimpleInsertMsg(schema, defaultMsgLength) - assert.NoError(t, err) - msg.PartitionID = UniqueID(1000) - fg, err := getFilterDMNode() - assert.NoError(t, err) - - res, err := fg.filterInvalidInsertMessage(msg, loadTypePartition) - assert.NoError(t, err) - assert.Nil(t, res) - }) - t.Run("test not target collection", func(t *testing.T) { msg, err := genSimpleInsertMsg(schema, defaultMsgLength) assert.NoError(t, err) @@ -162,17 +150,6 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) { assert.NotNil(t, res) }) - t.Run("test delete no partition", func(t *testing.T) { - msg := genDeleteMsg(defaultCollectionID, schemapb.DataType_Int64, defaultDelLength) - msg.PartitionID = UniqueID(1000) - fg, err := getFilterDMNode() - assert.NoError(t, err) - - res, err := fg.filterInvalidDeleteMessage(msg, loadTypePartition) - assert.NoError(t, err) - assert.Nil(t, res) - }) - t.Run("test delete not target collection", func(t *testing.T) { msg := genDeleteMsg(defaultCollectionID, schemapb.DataType_Int64, defaultDelLength) fg, err := getFilterDMNode() diff --git a/internal/querynode/flow_graph_insert_node.go b/internal/querynode/flow_graph_insert_node.go index 0a4b817a9b..de5804f2a7 100644 --- a/internal/querynode/flow_graph_insert_node.go +++ b/internal/querynode/flow_graph_insert_node.go @@ -314,15 +314,6 @@ func processDeleteMessages(replica ReplicaInterface, segType segmentType, msg *m var err error if msg.PartitionID != -1 { partitionIDs = []UniqueID{msg.GetPartitionID()} - } else { - partitionIDs, err = replica.getPartitionIDs(msg.GetCollectionID()) - if err != nil { - log.Warn("the collection has been released, ignore it", - zap.Int64("collectionID", msg.GetCollectionID()), - zap.Error(err), - ) - return err - } } var resultSegmentIDs []UniqueID resultSegmentIDs, err = replica.getSegmentIDsByVChannel(partitionIDs, vchannelName, segType) diff --git a/internal/querynode/flow_graph_insert_node_test.go b/internal/querynode/flow_graph_insert_node_test.go index 3627269796..874da7f2ec 100644 --- a/internal/querynode/flow_graph_insert_node_test.go +++ b/internal/querynode/flow_graph_insert_node_test.go @@ -288,7 +288,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) { }, } msg := []flowgraph.Msg{&iMsg} - assert.Panics(t, func() { insertNode.Operate(msg) }) + insertNode.Operate(msg) }) t.Run("test partition not exist", func(t *testing.T) { diff --git a/internal/querynode/impl.go b/internal/querynode/impl.go index 2d224e8579..e94ca22b31 100644 --- a/internal/querynode/impl.go +++ b/internal/querynode/impl.go @@ -566,10 +566,10 @@ func (node *QueryNode) ReleaseCollection(ctx context.Context, in *querypb.Releas return status, nil } -// ReleasePartitions clears all data related to this partition on the querynode -func (node *QueryNode) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { +func (node *QueryNode) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + nodeID := node.session.ServerID if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) { - err := fmt.Errorf("query node %d is not ready", node.GetSession().ServerID) + err := fmt.Errorf("query node %d is not ready", nodeID) status := &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: err.Error(), @@ -578,35 +578,58 @@ func (node *QueryNode) ReleasePartitions(ctx context.Context, in *querypb.Releas } defer node.lifetime.Done() - dct := &releasePartitionsTask{ - baseTask: baseTask{ - ctx: ctx, - done: make(chan error), - }, - req: in, - node: node, + // check target matches + if req.GetBase().GetTargetID() != nodeID { + status := &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_NodeIDNotMatch, + Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), nodeID), + } + return status, nil } - err := node.scheduler.queue.Enqueue(dct) - if err != nil { + log.Ctx(ctx).With(zap.Int64("colID", req.GetCollectionID()), zap.Int64s("partIDs", req.GetPartitionIDs())) + log.Info("loading partitions") + for _, part := range req.GetPartitionIDs() { + err := node.metaReplica.addPartition(req.GetCollectionID(), part) + if err != nil { + log.Warn(err.Error()) + } + } + log.Info("load partitions done") + status := &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + } + return status, nil +} + +// ReleasePartitions clears all data related to this partition on the querynode +func (node *QueryNode) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { + nodeID := node.session.ServerID + if !node.lifetime.Add(commonpbutil.IsHealthyOrStopping) { + err := fmt.Errorf("query node %d is not ready", nodeID) status := &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: err.Error(), } - log.Warn(err.Error()) return status, nil } - log.Info("releasePartitionsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("partitionIDs", in.PartitionIDs)) + defer node.lifetime.Done() - func() { - err = dct.WaitToFinish() - if err != nil { - log.Warn(err.Error()) - return + // check target matches + if req.GetBase().GetTargetID() != nodeID { + status := &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_NodeIDNotMatch, + Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), nodeID), } - log.Info("releasePartitionsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("partitionIDs", in.PartitionIDs)) - }() + return status, nil + } + log.Ctx(ctx).With(zap.Int64("colID", req.GetCollectionID()), zap.Int64s("partIDs", req.GetPartitionIDs())) + log.Info("releasing partitions") + for _, part := range req.GetPartitionIDs() { + node.metaReplica.removePartition(part) + } + log.Info("release partitions done") status := &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, } diff --git a/internal/querynode/impl_test.go b/internal/querynode/impl_test.go index db63c20e82..9ebd31f158 100644 --- a/internal/querynode/impl_test.go +++ b/internal/querynode/impl_test.go @@ -23,6 +23,10 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/milvus-io/milvus-proto/go-api/commonpb" "github.com/milvus-io/milvus-proto/go-api/milvuspb" "github.com/milvus-io/milvus/internal/common" @@ -33,10 +37,8 @@ import ( "github.com/milvus-io/milvus/internal/util/conc" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/metricsinfo" + "github.com/milvus-io/milvus/internal/util/paramtable" "github.com/milvus-io/milvus/internal/util/sessionutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) func TestImpl_GetComponentStates(t *testing.T) { @@ -360,6 +362,36 @@ func TestImpl_ReleaseCollection(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) } +func TestImpl_LoadPartitions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := genSimpleQueryNode(ctx) + assert.NoError(t, err) + + req := &queryPb.LoadPartitionsRequest{ + Base: &commonpb.MsgBase{ + TargetID: paramtable.GetNodeID(), + }, + CollectionID: defaultCollectionID, + PartitionIDs: []UniqueID{defaultPartitionID}, + } + + status, err := node.LoadPartitions(ctx, req) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) + + node.UpdateStateCode(commonpb.StateCode_Abnormal) + status, err = node.LoadPartitions(ctx, req) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) + + node.UpdateStateCode(commonpb.StateCode_Healthy) + req.Base.TargetID = -1 + status, err = node.LoadPartitions(ctx, req) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_NodeIDNotMatch, status.ErrorCode) +} + func TestImpl_ReleasePartitions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -368,8 +400,9 @@ func TestImpl_ReleasePartitions(t *testing.T) { req := &queryPb.ReleasePartitionsRequest{ Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_WatchQueryChannels, - MsgID: rand.Int63(), + MsgType: commonpb.MsgType_WatchQueryChannels, + MsgID: rand.Int63(), + TargetID: paramtable.GetNodeID(), }, NodeID: 0, CollectionID: defaultCollectionID, @@ -384,6 +417,12 @@ func TestImpl_ReleasePartitions(t *testing.T) { status, err = node.ReleasePartitions(ctx, req) assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) + + node.UpdateStateCode(commonpb.StateCode_Healthy) + req.Base.TargetID = -1 + status, err = node.ReleasePartitions(ctx, req) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_NodeIDNotMatch, status.ErrorCode) } func TestImpl_GetSegmentInfo(t *testing.T) { diff --git a/internal/querynode/meta_replica.go b/internal/querynode/meta_replica.go index 48a7a04e7e..91e5dfaa43 100644 --- a/internal/querynode/meta_replica.go +++ b/internal/querynode/meta_replica.go @@ -458,14 +458,14 @@ func (replica *metaReplica) removePartitionPrivate(partitionID UniqueID) error { } // delete segments - ids, _ := partition.getSegmentIDs(segmentTypeGrowing) - for _, segmentID := range ids { - replica.removeSegmentPrivate(segmentID, segmentTypeGrowing) - } - ids, _ = partition.getSegmentIDs(segmentTypeSealed) - for _, segmentID := range ids { - replica.removeSegmentPrivate(segmentID, segmentTypeSealed) - } + //ids, _ := partition.getSegmentIDs(segmentTypeGrowing) + //for _, segmentID := range ids { + // replica.removeSegmentPrivate(segmentID, segmentTypeGrowing) + //} + //ids, _ = partition.getSegmentIDs(segmentTypeSealed) + //for _, segmentID := range ids { + // replica.removeSegmentPrivate(segmentID, segmentTypeSealed) + //} collection.removePartitionID(partitionID) delete(replica.partitions, partitionID) @@ -589,10 +589,6 @@ func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID, // addSegmentPrivate is private function in collectionReplica, to add a new segment to collectionReplica func (replica *metaReplica) addSegmentPrivate(segment *Segment) error { segID := segment.segmentID - partition, err := replica.getPartitionByIDPrivate(segment.partitionID) - if err != nil { - return err - } segType := segment.getType() ok, err := replica.hasSegmentPrivate(segID, segType) @@ -603,12 +599,16 @@ func (replica *metaReplica) addSegmentPrivate(segment *Segment) error { return fmt.Errorf("segment has been existed, "+ "segmentID = %d, collectionID = %d, segmentType = %s", segID, segment.collectionID, segType.String()) } - partition.addSegmentID(segID, segType) switch segType { case segmentTypeGrowing: replica.growingSegments[segID] = segment case segmentTypeSealed: + partition, err := replica.getPartitionByIDPrivate(segment.partitionID) + if err != nil { + return err + } + partition.addSegmentID(segID, segType) replica.sealedSegments[segID] = segment default: return fmt.Errorf("unexpected segment type, segmentID = %d, segmentType = %s", segID, segType.String()) diff --git a/internal/querynode/search_test.go b/internal/querynode/search_test.go index 058548d179..593f3899cd 100644 --- a/internal/querynode/search_test.go +++ b/internal/querynode/search_test.go @@ -200,6 +200,7 @@ func TestStreaming_search(t *testing.T) { collection, err := streaming.getCollectionByID(defaultCollectionID) assert.NoError(t, err) + collection.setLoadType(loadTypeCollection) searchReq, err := genSearchPlanAndRequests(collection, IndexFaissIDMap, defaultNQ) assert.NoError(t, err) diff --git a/internal/querynode/statistic_test.go b/internal/querynode/statistic_test.go index c142938de4..b76483a34c 100644 --- a/internal/querynode/statistic_test.go +++ b/internal/querynode/statistic_test.go @@ -76,6 +76,10 @@ func TestHistorical_statistic(t *testing.T) { his, err := genSimpleReplicaWithSealSegment(ctx) assert.NoError(t, err) + collection, err := his.getCollectionByID(defaultCollectionID) + assert.NoError(t, err) + collection.setLoadType(loadTypeCollection) + err = his.removePartition(defaultPartitionID) assert.NoError(t, err) @@ -153,6 +157,10 @@ func TestStreaming_statistics(t *testing.T) { streaming, err := genSimpleReplicaWithGrowingSegment() assert.NoError(t, err) + collection, err := streaming.getCollectionByID(defaultCollectionID) + assert.NoError(t, err) + collection.setLoadType(loadTypeCollection) + err = streaming.removePartition(defaultPartitionID) assert.NoError(t, err) diff --git a/internal/querynode/validate_test.go b/internal/querynode/validate_test.go index ea505a6bc4..000c05d28d 100644 --- a/internal/querynode/validate_test.go +++ b/internal/querynode/validate_test.go @@ -86,6 +86,9 @@ func TestQueryShardHistorical_validateSegmentIDs(t *testing.T) { t.Run("test validate after partition release", func(t *testing.T) { his, err := genSimpleReplicaWithSealSegment(ctx) assert.NoError(t, err) + collection, err := his.getCollectionByID(defaultCollectionID) + assert.NoError(t, err) + collection.setLoadType(loadTypeCollection) err = his.removePartition(defaultPartitionID) assert.NoError(t, err) _, _, err = validateOnHistoricalReplica(context.TODO(), his, defaultCollectionID, []UniqueID{}, []UniqueID{defaultSegmentID}) diff --git a/internal/rootcoord/broker.go b/internal/rootcoord/broker.go index 7c77647ee8..3ea59f83fa 100644 --- a/internal/rootcoord/broker.go +++ b/internal/rootcoord/broker.go @@ -48,6 +48,8 @@ type watchInfo struct { // Broker communicates with other components. type Broker interface { ReleaseCollection(ctx context.Context, collectionID UniqueID) error + ReleasePartitions(ctx context.Context, collectionID UniqueID, partitionIDs ...UniqueID) error + SyncNewCreatedPartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID) error GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) WatchChannels(ctx context.Context, info *watchInfo) error @@ -93,6 +95,49 @@ func (b *ServerBroker) ReleaseCollection(ctx context.Context, collectionID Uniqu return nil } +func (b *ServerBroker) ReleasePartitions(ctx context.Context, collectionID UniqueID, partitionIDs ...UniqueID) error { + if len(partitionIDs) == 0 { + return nil + } + log := log.Ctx(ctx).With(zap.Int64("collection", collectionID), zap.Int64s("partitionIDs", partitionIDs)) + log.Info("releasing partitions") + resp, err := b.s.queryCoord.ReleasePartitions(ctx, &querypb.ReleasePartitionsRequest{ + Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ReleasePartitions)), + CollectionID: collectionID, + PartitionIDs: partitionIDs, + }) + if err != nil { + return err + } + + if resp.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("release partition failed, reason: %s", resp.GetReason()) + } + + log.Info("release partitions done") + return nil +} + +func (b *ServerBroker) SyncNewCreatedPartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID) error { + log := log.Ctx(ctx).With(zap.Int64("collection", collectionID), zap.Int64("partitionID", partitionID)) + log.Info("begin to sync new partition") + resp, err := b.s.queryCoord.SyncNewCreatedPartition(ctx, &querypb.SyncNewCreatedPartitionRequest{ + Base: commonpbutil.NewMsgBase(commonpbutil.WithMsgType(commonpb.MsgType_ReleasePartitions)), + CollectionID: collectionID, + PartitionID: partitionID, + }) + if err != nil { + return err + } + + if resp.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("sync new partition failed, reason: %s", resp.GetReason()) + } + + log.Info("sync new partition done") + return nil +} + func (b *ServerBroker) GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) { resp, err := b.s.queryCoord.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{ Base: commonpbutil.NewMsgBase( diff --git a/internal/rootcoord/create_partition_task.go b/internal/rootcoord/create_partition_task.go index 5c53403e8b..2a1fc0845c 100644 --- a/internal/rootcoord/create_partition_task.go +++ b/internal/rootcoord/create_partition_task.go @@ -73,7 +73,7 @@ func (t *createPartitionTask) Execute(ctx context.Context) error { PartitionCreatedTimestamp: t.GetTs(), Extra: nil, CollectionID: t.collMeta.CollectionID, - State: pb.PartitionState_PartitionCreated, + State: pb.PartitionState_PartitionCreating, } undoTask := newBaseUndoTask(t.core.stepExecutor) @@ -88,5 +88,23 @@ func (t *createPartitionTask) Execute(ctx context.Context) error { partition: partition, }, &nullStep{}) // adding partition is atomic enough. + undoTask.AddStep(&syncNewCreatedPartitionStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionID: partID, + }, &releasePartitionsStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionIDs: []int64{partID}, + }) + + undoTask.AddStep(&changePartitionStateStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionID: partID, + state: pb.PartitionState_PartitionCreated, + ts: t.GetTs(), + }, &nullStep{}) + return undoTask.Execute(ctx) } diff --git a/internal/rootcoord/create_partition_task_test.go b/internal/rootcoord/create_partition_task_test.go index f2fb28c09e..262d20a549 100644 --- a/internal/rootcoord/create_partition_task_test.go +++ b/internal/rootcoord/create_partition_task_test.go @@ -20,14 +20,13 @@ import ( "context" "testing" - "github.com/milvus-io/milvus/internal/util/funcutil" - - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/stretchr/testify/assert" "github.com/milvus-io/milvus-proto/go-api/commonpb" "github.com/milvus-io/milvus-proto/go-api/milvuspb" + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/util/funcutil" ) func Test_createPartitionTask_Prepare(t *testing.T) { @@ -147,7 +146,14 @@ func Test_createPartitionTask_Execute(t *testing.T) { meta.AddPartitionFunc = func(ctx context.Context, partition *model.Partition) error { return nil } - core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withMeta(meta)) + meta.ChangePartitionStateFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state etcdpb.PartitionState, ts Timestamp) error { + return nil + } + b := newMockBroker() + b.SyncNewCreatedPartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID) error { + return nil + } + core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withMeta(meta), withBroker(b)) task := &createPartitionTask{ baseTask: baseTask{core: core}, collMeta: coll, diff --git a/internal/rootcoord/drop_partition_task.go b/internal/rootcoord/drop_partition_task.go index cf041033e6..c4c01b9515 100644 --- a/internal/rootcoord/drop_partition_task.go +++ b/internal/rootcoord/drop_partition_task.go @@ -85,7 +85,12 @@ func (t *dropPartitionTask) Execute(ctx context.Context) error { ts: t.GetTs(), }) - // TODO: release partition when query coord is ready. + redoTask.AddAsyncStep(&releasePartitionsStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionIDs: []int64{partID}, + }) + redoTask.AddAsyncStep(&deletePartitionDataStep{ baseStep: baseStep{core: t.core}, pchans: t.collMeta.PhysicalChannelNames, diff --git a/internal/rootcoord/drop_partition_task_test.go b/internal/rootcoord/drop_partition_task_test.go index b77694e6ff..f3b658c457 100644 --- a/internal/rootcoord/drop_partition_task_test.go +++ b/internal/rootcoord/drop_partition_task_test.go @@ -177,6 +177,9 @@ func Test_dropPartitionTask_Execute(t *testing.T) { broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID, partIDs []UniqueID) error { return nil } + broker.ReleasePartitionsFunc = func(ctx context.Context, collectionID UniqueID, partitionIDs ...UniqueID) error { + return nil + } core := newTestCore( withValidProxyManager(), diff --git a/internal/rootcoord/meta_table.go b/internal/rootcoord/meta_table.go index 037d7029fc..36bb270e42 100644 --- a/internal/rootcoord/meta_table.go +++ b/internal/rootcoord/meta_table.go @@ -512,7 +512,7 @@ func (mt *MetaTable) AddPartition(ctx context.Context, partition *model.Partitio if !ok || !coll.Available() { return fmt.Errorf("collection not exists: %d", partition.CollectionID) } - if partition.State != pb.PartitionState_PartitionCreated { + if partition.State != pb.PartitionState_PartitionCreating { return fmt.Errorf("partition state is not created, collection: %d, partition: %d, state: %s", partition.CollectionID, partition.PartitionID, partition.State) } if err := mt.catalog.CreatePartition(ctx, partition, partition.PartitionCreatedTimestamp); err != nil { diff --git a/internal/rootcoord/meta_table_test.go b/internal/rootcoord/meta_table_test.go index f69218a026..4e62695e2d 100644 --- a/internal/rootcoord/meta_table_test.go +++ b/internal/rootcoord/meta_table_test.go @@ -892,7 +892,7 @@ func TestMetaTable_AddPartition(t *testing.T) { 100: {Name: "test", CollectionID: 100}, }, } - err := meta.AddPartition(context.TODO(), &model.Partition{CollectionID: 100, State: pb.PartitionState_PartitionCreated}) + err := meta.AddPartition(context.TODO(), &model.Partition{CollectionID: 100, State: pb.PartitionState_PartitionCreating}) assert.Error(t, err) }) @@ -909,7 +909,7 @@ func TestMetaTable_AddPartition(t *testing.T) { 100: {Name: "test", CollectionID: 100}, }, } - err := meta.AddPartition(context.TODO(), &model.Partition{CollectionID: 100, State: pb.PartitionState_PartitionCreated}) + err := meta.AddPartition(context.TODO(), &model.Partition{CollectionID: 100, State: pb.PartitionState_PartitionCreating}) assert.NoError(t, err) }) } diff --git a/internal/rootcoord/mock_test.go b/internal/rootcoord/mock_test.go index 5a2f8a6074..c254f2b02e 100644 --- a/internal/rootcoord/mock_test.go +++ b/internal/rootcoord/mock_test.go @@ -500,12 +500,20 @@ func withValidQueryCoord() Opt { succStatus(), nil, ) + qc.EXPECT().ReleasePartitions(mock.Anything, mock.Anything).Return( + succStatus(), nil, + ) + qc.EXPECT().GetSegmentInfo(mock.Anything, mock.Anything).Return( &querypb.GetSegmentInfoResponse{ Status: succStatus(), }, nil, ) + qc.EXPECT().SyncNewCreatedPartition(mock.Anything, mock.Anything).Return( + succStatus(), nil, + ) + return withQueryCoord(qc) } @@ -779,8 +787,10 @@ func withMetricsCacheManager() Opt { type mockBroker struct { Broker - ReleaseCollectionFunc func(ctx context.Context, collectionID UniqueID) error - GetQuerySegmentInfoFunc func(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) + ReleaseCollectionFunc func(ctx context.Context, collectionID UniqueID) error + ReleasePartitionsFunc func(ctx context.Context, collectionID UniqueID, partitionIDs ...UniqueID) error + SyncNewCreatedPartitionFunc func(ctx context.Context, collectionID UniqueID, partitionID UniqueID) error + GetQuerySegmentInfoFunc func(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) WatchChannelsFunc func(ctx context.Context, info *watchInfo) error UnwatchChannelsFunc func(ctx context.Context, info *watchInfo) error @@ -814,6 +824,14 @@ func (b mockBroker) ReleaseCollection(ctx context.Context, collectionID UniqueID return b.ReleaseCollectionFunc(ctx, collectionID) } +func (b mockBroker) ReleasePartitions(ctx context.Context, collectionID UniqueID, partitionIDs ...UniqueID) error { + return b.ReleasePartitionsFunc(ctx, collectionID) +} + +func (b mockBroker) SyncNewCreatedPartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID) error { + return b.SyncNewCreatedPartitionFunc(ctx, collectionID, partitionID) +} + func (b mockBroker) DropCollectionIndex(ctx context.Context, collID UniqueID, partIDs []UniqueID) error { return b.DropCollectionIndexFunc(ctx, collID, partIDs) } diff --git a/internal/rootcoord/step.go b/internal/rootcoord/step.go index 65bfe6d94e..38a7bf542e 100644 --- a/internal/rootcoord/step.go +++ b/internal/rootcoord/step.go @@ -273,6 +273,44 @@ func (s *releaseCollectionStep) Weight() stepPriority { return stepPriorityUrgent } +type releasePartitionsStep struct { + baseStep + collectionID UniqueID + partitionIDs []UniqueID +} + +func (s *releasePartitionsStep) Execute(ctx context.Context) ([]nestedStep, error) { + err := s.core.broker.ReleasePartitions(ctx, s.collectionID, s.partitionIDs...) + return nil, err +} + +func (s *releasePartitionsStep) Desc() string { + return fmt.Sprintf("release partitions, collectionID=%d, partitionIDs=%v", s.collectionID, s.partitionIDs) +} + +func (s *releasePartitionsStep) Weight() stepPriority { + return stepPriorityUrgent +} + +type syncNewCreatedPartitionStep struct { + baseStep + collectionID UniqueID + partitionID UniqueID +} + +func (s *syncNewCreatedPartitionStep) Execute(ctx context.Context) ([]nestedStep, error) { + err := s.core.broker.SyncNewCreatedPartition(ctx, s.collectionID, s.partitionID) + return nil, err +} + +func (s *syncNewCreatedPartitionStep) Desc() string { + return fmt.Sprintf("sync new partition, collectionID=%d, partitionID=%d", s.partitionID, s.partitionID) +} + +func (s *syncNewCreatedPartitionStep) Weight() stepPriority { + return stepPriorityUrgent +} + type dropIndexStep struct { baseStep collID UniqueID diff --git a/internal/types/mock_querycoord.go b/internal/types/mock_querycoord.go index 23767aabd3..8d062958ab 100644 --- a/internal/types/mock_querycoord.go +++ b/internal/types/mock_querycoord.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.16.0. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package types @@ -59,8 +59,8 @@ type MockQueryCoord_CheckHealth_Call struct { } // CheckHealth is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.CheckHealthRequest +// - ctx context.Context +// - req *milvuspb.CheckHealthRequest func (_e *MockQueryCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockQueryCoord_CheckHealth_Call { return &MockQueryCoord_CheckHealth_Call{Call: _e.mock.On("CheckHealth", ctx, req)} } @@ -106,8 +106,8 @@ type MockQueryCoord_CreateResourceGroup_Call struct { } // CreateResourceGroup is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.CreateResourceGroupRequest +// - ctx context.Context +// - req *milvuspb.CreateResourceGroupRequest func (_e *MockQueryCoord_Expecter) CreateResourceGroup(ctx interface{}, req interface{}) *MockQueryCoord_CreateResourceGroup_Call { return &MockQueryCoord_CreateResourceGroup_Call{Call: _e.mock.On("CreateResourceGroup", ctx, req)} } @@ -153,8 +153,8 @@ type MockQueryCoord_DescribeResourceGroup_Call struct { } // DescribeResourceGroup is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.DescribeResourceGroupRequest +// - ctx context.Context +// - req *querypb.DescribeResourceGroupRequest func (_e *MockQueryCoord_Expecter) DescribeResourceGroup(ctx interface{}, req interface{}) *MockQueryCoord_DescribeResourceGroup_Call { return &MockQueryCoord_DescribeResourceGroup_Call{Call: _e.mock.On("DescribeResourceGroup", ctx, req)} } @@ -200,8 +200,8 @@ type MockQueryCoord_DropResourceGroup_Call struct { } // DropResourceGroup is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.DropResourceGroupRequest +// - ctx context.Context +// - req *milvuspb.DropResourceGroupRequest func (_e *MockQueryCoord_Expecter) DropResourceGroup(ctx interface{}, req interface{}) *MockQueryCoord_DropResourceGroup_Call { return &MockQueryCoord_DropResourceGroup_Call{Call: _e.mock.On("DropResourceGroup", ctx, req)} } @@ -247,7 +247,7 @@ type MockQueryCoord_GetComponentStates_Call struct { } // GetComponentStates is a helper method to define mock.On call -// - ctx context.Context +// - ctx context.Context func (_e *MockQueryCoord_Expecter) GetComponentStates(ctx interface{}) *MockQueryCoord_GetComponentStates_Call { return &MockQueryCoord_GetComponentStates_Call{Call: _e.mock.On("GetComponentStates", ctx)} } @@ -293,8 +293,8 @@ type MockQueryCoord_GetMetrics_Call struct { } // GetMetrics is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.GetMetricsRequest +// - ctx context.Context +// - req *milvuspb.GetMetricsRequest func (_e *MockQueryCoord_Expecter) GetMetrics(ctx interface{}, req interface{}) *MockQueryCoord_GetMetrics_Call { return &MockQueryCoord_GetMetrics_Call{Call: _e.mock.On("GetMetrics", ctx, req)} } @@ -340,8 +340,8 @@ type MockQueryCoord_GetPartitionStates_Call struct { } // GetPartitionStates is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.GetPartitionStatesRequest +// - ctx context.Context +// - req *querypb.GetPartitionStatesRequest func (_e *MockQueryCoord_Expecter) GetPartitionStates(ctx interface{}, req interface{}) *MockQueryCoord_GetPartitionStates_Call { return &MockQueryCoord_GetPartitionStates_Call{Call: _e.mock.On("GetPartitionStates", ctx, req)} } @@ -387,8 +387,8 @@ type MockQueryCoord_GetReplicas_Call struct { } // GetReplicas is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.GetReplicasRequest +// - ctx context.Context +// - req *milvuspb.GetReplicasRequest func (_e *MockQueryCoord_Expecter) GetReplicas(ctx interface{}, req interface{}) *MockQueryCoord_GetReplicas_Call { return &MockQueryCoord_GetReplicas_Call{Call: _e.mock.On("GetReplicas", ctx, req)} } @@ -434,8 +434,8 @@ type MockQueryCoord_GetSegmentInfo_Call struct { } // GetSegmentInfo is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.GetSegmentInfoRequest +// - ctx context.Context +// - req *querypb.GetSegmentInfoRequest func (_e *MockQueryCoord_Expecter) GetSegmentInfo(ctx interface{}, req interface{}) *MockQueryCoord_GetSegmentInfo_Call { return &MockQueryCoord_GetSegmentInfo_Call{Call: _e.mock.On("GetSegmentInfo", ctx, req)} } @@ -481,8 +481,8 @@ type MockQueryCoord_GetShardLeaders_Call struct { } // GetShardLeaders is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.GetShardLeadersRequest +// - ctx context.Context +// - req *querypb.GetShardLeadersRequest func (_e *MockQueryCoord_Expecter) GetShardLeaders(ctx interface{}, req interface{}) *MockQueryCoord_GetShardLeaders_Call { return &MockQueryCoord_GetShardLeaders_Call{Call: _e.mock.On("GetShardLeaders", ctx, req)} } @@ -528,7 +528,7 @@ type MockQueryCoord_GetStatisticsChannel_Call struct { } // GetStatisticsChannel is a helper method to define mock.On call -// - ctx context.Context +// - ctx context.Context func (_e *MockQueryCoord_Expecter) GetStatisticsChannel(ctx interface{}) *MockQueryCoord_GetStatisticsChannel_Call { return &MockQueryCoord_GetStatisticsChannel_Call{Call: _e.mock.On("GetStatisticsChannel", ctx)} } @@ -574,7 +574,7 @@ type MockQueryCoord_GetTimeTickChannel_Call struct { } // GetTimeTickChannel is a helper method to define mock.On call -// - ctx context.Context +// - ctx context.Context func (_e *MockQueryCoord_Expecter) GetTimeTickChannel(ctx interface{}) *MockQueryCoord_GetTimeTickChannel_Call { return &MockQueryCoord_GetTimeTickChannel_Call{Call: _e.mock.On("GetTimeTickChannel", ctx)} } @@ -656,8 +656,8 @@ type MockQueryCoord_ListResourceGroups_Call struct { } // ListResourceGroups is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.ListResourceGroupsRequest +// - ctx context.Context +// - req *milvuspb.ListResourceGroupsRequest func (_e *MockQueryCoord_Expecter) ListResourceGroups(ctx interface{}, req interface{}) *MockQueryCoord_ListResourceGroups_Call { return &MockQueryCoord_ListResourceGroups_Call{Call: _e.mock.On("ListResourceGroups", ctx, req)} } @@ -703,8 +703,8 @@ type MockQueryCoord_LoadBalance_Call struct { } // LoadBalance is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.LoadBalanceRequest +// - ctx context.Context +// - req *querypb.LoadBalanceRequest func (_e *MockQueryCoord_Expecter) LoadBalance(ctx interface{}, req interface{}) *MockQueryCoord_LoadBalance_Call { return &MockQueryCoord_LoadBalance_Call{Call: _e.mock.On("LoadBalance", ctx, req)} } @@ -750,8 +750,8 @@ type MockQueryCoord_LoadCollection_Call struct { } // LoadCollection is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.LoadCollectionRequest +// - ctx context.Context +// - req *querypb.LoadCollectionRequest func (_e *MockQueryCoord_Expecter) LoadCollection(ctx interface{}, req interface{}) *MockQueryCoord_LoadCollection_Call { return &MockQueryCoord_LoadCollection_Call{Call: _e.mock.On("LoadCollection", ctx, req)} } @@ -797,8 +797,8 @@ type MockQueryCoord_LoadPartitions_Call struct { } // LoadPartitions is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.LoadPartitionsRequest +// - ctx context.Context +// - req *querypb.LoadPartitionsRequest func (_e *MockQueryCoord_Expecter) LoadPartitions(ctx interface{}, req interface{}) *MockQueryCoord_LoadPartitions_Call { return &MockQueryCoord_LoadPartitions_Call{Call: _e.mock.On("LoadPartitions", ctx, req)} } @@ -880,8 +880,8 @@ type MockQueryCoord_ReleaseCollection_Call struct { } // ReleaseCollection is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.ReleaseCollectionRequest +// - ctx context.Context +// - req *querypb.ReleaseCollectionRequest func (_e *MockQueryCoord_Expecter) ReleaseCollection(ctx interface{}, req interface{}) *MockQueryCoord_ReleaseCollection_Call { return &MockQueryCoord_ReleaseCollection_Call{Call: _e.mock.On("ReleaseCollection", ctx, req)} } @@ -927,8 +927,8 @@ type MockQueryCoord_ReleasePartitions_Call struct { } // ReleasePartitions is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.ReleasePartitionsRequest +// - ctx context.Context +// - req *querypb.ReleasePartitionsRequest func (_e *MockQueryCoord_Expecter) ReleasePartitions(ctx interface{}, req interface{}) *MockQueryCoord_ReleasePartitions_Call { return &MockQueryCoord_ReleasePartitions_Call{Call: _e.mock.On("ReleasePartitions", ctx, req)} } @@ -956,7 +956,7 @@ type MockQueryCoord_SetAddress_Call struct { } // SetAddress is a helper method to define mock.On call -// - address string +// - address string func (_e *MockQueryCoord_Expecter) SetAddress(address interface{}) *MockQueryCoord_SetAddress_Call { return &MockQueryCoord_SetAddress_Call{Call: _e.mock.On("SetAddress", address)} } @@ -993,7 +993,7 @@ type MockQueryCoord_SetDataCoord_Call struct { } // SetDataCoord is a helper method to define mock.On call -// - dataCoord DataCoord +// - dataCoord DataCoord func (_e *MockQueryCoord_Expecter) SetDataCoord(dataCoord interface{}) *MockQueryCoord_SetDataCoord_Call { return &MockQueryCoord_SetDataCoord_Call{Call: _e.mock.On("SetDataCoord", dataCoord)} } @@ -1021,7 +1021,7 @@ type MockQueryCoord_SetEtcdClient_Call struct { } // SetEtcdClient is a helper method to define mock.On call -// - etcdClient *clientv3.Client +// - etcdClient *clientv3.Client func (_e *MockQueryCoord_Expecter) SetEtcdClient(etcdClient interface{}) *MockQueryCoord_SetEtcdClient_Call { return &MockQueryCoord_SetEtcdClient_Call{Call: _e.mock.On("SetEtcdClient", etcdClient)} } @@ -1049,7 +1049,7 @@ type MockQueryCoord_SetQueryNodeCreator_Call struct { } // SetQueryNodeCreator is a helper method to define mock.On call -// - _a0 func(context.Context , string)(QueryNode , error) +// - _a0 func(context.Context , string)(QueryNode , error) func (_e *MockQueryCoord_Expecter) SetQueryNodeCreator(_a0 interface{}) *MockQueryCoord_SetQueryNodeCreator_Call { return &MockQueryCoord_SetQueryNodeCreator_Call{Call: _e.mock.On("SetQueryNodeCreator", _a0)} } @@ -1086,7 +1086,7 @@ type MockQueryCoord_SetRootCoord_Call struct { } // SetRootCoord is a helper method to define mock.On call -// - rootCoord RootCoord +// - rootCoord RootCoord func (_e *MockQueryCoord_Expecter) SetRootCoord(rootCoord interface{}) *MockQueryCoord_SetRootCoord_Call { return &MockQueryCoord_SetRootCoord_Call{Call: _e.mock.On("SetRootCoord", rootCoord)} } @@ -1132,8 +1132,8 @@ type MockQueryCoord_ShowCollections_Call struct { } // ShowCollections is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.ShowCollectionsRequest +// - ctx context.Context +// - req *querypb.ShowCollectionsRequest func (_e *MockQueryCoord_Expecter) ShowCollections(ctx interface{}, req interface{}) *MockQueryCoord_ShowCollections_Call { return &MockQueryCoord_ShowCollections_Call{Call: _e.mock.On("ShowCollections", ctx, req)} } @@ -1179,8 +1179,8 @@ type MockQueryCoord_ShowConfigurations_Call struct { } // ShowConfigurations is a helper method to define mock.On call -// - ctx context.Context -// - req *internalpb.ShowConfigurationsRequest +// - ctx context.Context +// - req *internalpb.ShowConfigurationsRequest func (_e *MockQueryCoord_Expecter) ShowConfigurations(ctx interface{}, req interface{}) *MockQueryCoord_ShowConfigurations_Call { return &MockQueryCoord_ShowConfigurations_Call{Call: _e.mock.On("ShowConfigurations", ctx, req)} } @@ -1226,8 +1226,8 @@ type MockQueryCoord_ShowPartitions_Call struct { } // ShowPartitions is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.ShowPartitionsRequest +// - ctx context.Context +// - req *querypb.ShowPartitionsRequest func (_e *MockQueryCoord_Expecter) ShowPartitions(ctx interface{}, req interface{}) *MockQueryCoord_ShowPartitions_Call { return &MockQueryCoord_ShowPartitions_Call{Call: _e.mock.On("ShowPartitions", ctx, req)} } @@ -1316,6 +1316,53 @@ func (_c *MockQueryCoord_Stop_Call) Return(_a0 error) *MockQueryCoord_Stop_Call return _c } +// SyncNewCreatedPartition provides a mock function with given fields: ctx, req +func (_m *MockQueryCoord) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest) (*commonpb.Status, error) { + ret := _m.Called(ctx, req) + + var r0 *commonpb.Status + if rf, ok := ret.Get(0).(func(context.Context, *querypb.SyncNewCreatedPartitionRequest) *commonpb.Status); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*commonpb.Status) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *querypb.SyncNewCreatedPartitionRequest) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQueryCoord_SyncNewCreatedPartition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncNewCreatedPartition' +type MockQueryCoord_SyncNewCreatedPartition_Call struct { + *mock.Call +} + +// SyncNewCreatedPartition is a helper method to define mock.On call +// - ctx context.Context +// - req *querypb.SyncNewCreatedPartitionRequest +func (_e *MockQueryCoord_Expecter) SyncNewCreatedPartition(ctx interface{}, req interface{}) *MockQueryCoord_SyncNewCreatedPartition_Call { + return &MockQueryCoord_SyncNewCreatedPartition_Call{Call: _e.mock.On("SyncNewCreatedPartition", ctx, req)} +} + +func (_c *MockQueryCoord_SyncNewCreatedPartition_Call) Run(run func(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest)) *MockQueryCoord_SyncNewCreatedPartition_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*querypb.SyncNewCreatedPartitionRequest)) + }) + return _c +} + +func (_c *MockQueryCoord_SyncNewCreatedPartition_Call) Return(_a0 *commonpb.Status, _a1 error) *MockQueryCoord_SyncNewCreatedPartition_Call { + _c.Call.Return(_a0, _a1) + return _c +} + // TransferNode provides a mock function with given fields: ctx, req func (_m *MockQueryCoord) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { ret := _m.Called(ctx, req) @@ -1345,8 +1392,8 @@ type MockQueryCoord_TransferNode_Call struct { } // TransferNode is a helper method to define mock.On call -// - ctx context.Context -// - req *milvuspb.TransferNodeRequest +// - ctx context.Context +// - req *milvuspb.TransferNodeRequest func (_e *MockQueryCoord_Expecter) TransferNode(ctx interface{}, req interface{}) *MockQueryCoord_TransferNode_Call { return &MockQueryCoord_TransferNode_Call{Call: _e.mock.On("TransferNode", ctx, req)} } @@ -1392,8 +1439,8 @@ type MockQueryCoord_TransferReplica_Call struct { } // TransferReplica is a helper method to define mock.On call -// - ctx context.Context -// - req *querypb.TransferReplicaRequest +// - ctx context.Context +// - req *querypb.TransferReplicaRequest func (_e *MockQueryCoord_Expecter) TransferReplica(ctx interface{}, req interface{}) *MockQueryCoord_TransferReplica_Call { return &MockQueryCoord_TransferReplica_Call{Call: _e.mock.On("TransferReplica", ctx, req)} } @@ -1421,7 +1468,7 @@ type MockQueryCoord_UpdateStateCode_Call struct { } // UpdateStateCode is a helper method to define mock.On call -// - stateCode commonpb.StateCode +// - stateCode commonpb.StateCode func (_e *MockQueryCoord_Expecter) UpdateStateCode(stateCode interface{}) *MockQueryCoord_UpdateStateCode_Call { return &MockQueryCoord_UpdateStateCode_Call{Call: _e.mock.On("UpdateStateCode", stateCode)} } diff --git a/internal/types/types.go b/internal/types/types.go index f6aab4108c..68fff63f26 100644 --- a/internal/types/types.go +++ b/internal/types/types.go @@ -1330,6 +1330,7 @@ type QueryNode interface { // All the sealed segments are loaded. LoadSegments(ctx context.Context, req *querypb.LoadSegmentsRequest) (*commonpb.Status, error) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) + LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) ReleaseSegments(ctx context.Context, req *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) @@ -1374,6 +1375,7 @@ type QueryCoord interface { ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) + SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest) (*commonpb.Status, error) LoadBalance(ctx context.Context, req *querypb.LoadBalanceRequest) (*commonpb.Status, error) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) diff --git a/internal/util/mock/grpc_querycoord_client.go b/internal/util/mock/grpc_querycoord_client.go index 6461fc6d85..6c32c943ad 100644 --- a/internal/util/mock/grpc_querycoord_client.go +++ b/internal/util/mock/grpc_querycoord_client.go @@ -82,6 +82,10 @@ func (m *GrpcQueryCoordClient) GetSegmentInfo(ctx context.Context, in *querypb.G return &querypb.GetSegmentInfoResponse{}, m.Err } +func (m *GrpcQueryCoordClient) SyncNewCreatedPartition(ctx context.Context, req *querypb.SyncNewCreatedPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + func (m *GrpcQueryCoordClient) LoadBalance(ctx context.Context, in *querypb.LoadBalanceRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { return &commonpb.Status{}, m.Err } diff --git a/internal/util/mock/grpc_querynode_client.go b/internal/util/mock/grpc_querynode_client.go index dade724ac5..7e9e7ec582 100644 --- a/internal/util/mock/grpc_querynode_client.go +++ b/internal/util/mock/grpc_querynode_client.go @@ -61,6 +61,10 @@ func (m *GrpcQueryNodeClient) ReleaseCollection(ctx context.Context, in *querypb return &commonpb.Status{}, m.Err } +func (m *GrpcQueryNodeClient) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + func (m *GrpcQueryNodeClient) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { return &commonpb.Status{}, m.Err } diff --git a/internal/util/mock/querynode_client.go b/internal/util/mock/querynode_client.go index 2f2dd6dfa6..22052d7de4 100644 --- a/internal/util/mock/querynode_client.go +++ b/internal/util/mock/querynode_client.go @@ -77,6 +77,10 @@ func (q QueryNodeClient) ReleaseCollection(ctx context.Context, req *querypb.Rel return q.grpcClient.ReleaseCollection(ctx, req) } +func (q QueryNodeClient) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { + return q.grpcClient.LoadPartitions(ctx, req) +} + func (q QueryNodeClient) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { return q.grpcClient.ReleasePartitions(ctx, req) } diff --git a/tests/python_client/testcases/test_collection.py b/tests/python_client/testcases/test_collection.py index 1bfe8047ba..b13d614931 100644 --- a/tests/python_client/testcases/test_collection.py +++ b/tests/python_client/testcases/test_collection.py @@ -1116,9 +1116,7 @@ class TestCollectionOperation(TestcaseBase): partition_w1.insert(cf.gen_default_list_data()) collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index) collection_w.load() - error = {ct.err_code: 5, ct.err_msg: f'load the partition after load collection is not supported'} - partition_w1.load(check_task=CheckTasks.err_res, - check_items=error) + partition_w1.load() @pytest.mark.tags(CaseLabel.L2) def test_load_collection_release_partition(self): @@ -1133,9 +1131,7 @@ class TestCollectionOperation(TestcaseBase): partition_w1.insert(cf.gen_default_list_data()) collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index) collection_w.load() - error = {ct.err_code: 1, ct.err_msg: f'releasing the partition after load collection is not supported'} - partition_w1.release(check_task=CheckTasks.err_res, - check_items=error) + partition_w1.release() @pytest.mark.tags(CaseLabel.L2) def test_load_collection_after_release_collection(self):