diff --git a/go.mod b/go.mod index 9825b7e5c8..9cfb40ad67 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/klauspost/compress v1.14.4 github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76 github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d - github.com/milvus-io/milvus-proto/go-api v0.0.0-20230112125535-5f87a812202c + github.com/milvus-io/milvus-proto/go-api v0.0.0-20230129073344-87a125853a0b github.com/minio/minio-go/v7 v7.0.17 github.com/panjf2000/ants/v2 v2.4.8 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 3300fbe4a8..91d3b20aa1 100644 --- a/go.sum +++ b/go.sum @@ -491,8 +491,8 @@ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyex github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8= github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4= -github.com/milvus-io/milvus-proto/go-api v0.0.0-20230112125535-5f87a812202c h1:74uRPm5WWagMe8bItOQ8QFuXcrUIWuWGAQ1GrwVM4J4= -github.com/milvus-io/milvus-proto/go-api v0.0.0-20230112125535-5f87a812202c/go.mod h1:148qnlmZ0Fdm1Fq+Mj/OW2uDoEP25g3mjh0vMGtkgmk= +github.com/milvus-io/milvus-proto/go-api v0.0.0-20230129073344-87a125853a0b h1:HoJ3J70COnaR3WQTA4gN70DkiaMRPkyLI6yXrPqpFiU= +github.com/milvus-io/milvus-proto/go-api v0.0.0-20230129073344-87a125853a0b/go.mod h1:148qnlmZ0Fdm1Fq+Mj/OW2uDoEP25g3mjh0vMGtkgmk= github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A= github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= diff --git a/internal/core/src/pb/common.pb.cc b/internal/core/src/pb/common.pb.cc index 8da8ef9393..f1e70656c4 100644 --- a/internal/core/src/pb/common.pb.cc +++ b/internal/core/src/pb/common.pb.cc @@ -372,7 +372,7 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE( "\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004\022\014\n\010Flushing\020\005\022\013" "\n\007Dropped\020\006\022\r\n\tImporting\020\007*>\n\017Placeholde" "rType\022\010\n\004None\020\000\022\020\n\014BinaryVector\020d\022\017\n\013Flo" - "atVector\020e*\277\016\n\007MsgType\022\r\n\tUndefined\020\000\022\024\n" + "atVector\020e*\300\016\n\007MsgType\022\r\n\tUndefined\020\000\022\024\n" "\020CreateCollection\020d\022\022\n\016DropCollection\020e\022" "\021\n\rHasCollection\020f\022\026\n\022DescribeCollection" "\020g\022\023\n\017ShowCollections\020h\022\024\n\020GetSystemConf" @@ -416,53 +416,53 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE( "\n\020OperatePrivilege\020\306\014\022\020\n\013SelectGrant\020\307\014\022" "\033\n\026RefreshPolicyInfoCache\020\310\014\022\017\n\nListPoli" "cy\020\311\014\022\030\n\023CreateResourceGroup\020\244\r\022\026\n\021DropR" - "esourceGroup\020\245\r\022\026\n\021ListResourceGroup\020\246\r\022" - "\032\n\025DescribeResourceGroup\020\247\r\022\021\n\014TransferN" - "ode\020\250\r\022\024\n\017TransferReplica\020\251\r*\"\n\007DslType\022" - "\007\n\003Dsl\020\000\022\016\n\nBoolExprV1\020\001*B\n\017CompactionSt" - "ate\022\021\n\rUndefiedState\020\000\022\r\n\tExecuting\020\001\022\r\n" - "\tCompleted\020\002*X\n\020ConsistencyLevel\022\n\n\006Stro" - "ng\020\000\022\013\n\007Session\020\001\022\013\n\007Bounded\020\002\022\016\n\nEventu" - "ally\020\003\022\016\n\nCustomized\020\004*\236\001\n\013ImportState\022\021" - "\n\rImportPending\020\000\022\020\n\014ImportFailed\020\001\022\021\n\rI" - "mportStarted\020\002\022\023\n\017ImportPersisted\020\005\022\021\n\rI" - "mportFlushed\020\010\022\023\n\017ImportCompleted\020\006\022\032\n\026I" - "mportFailedAndCleaned\020\007*2\n\nObjectType\022\016\n" - "\nCollection\020\000\022\n\n\006Global\020\001\022\010\n\004User\020\002*\233\005\n\017" - "ObjectPrivilege\022\020\n\014PrivilegeAll\020\000\022\035\n\031Pri" - "vilegeCreateCollection\020\001\022\033\n\027PrivilegeDro" - "pCollection\020\002\022\037\n\033PrivilegeDescribeCollec" - "tion\020\003\022\034\n\030PrivilegeShowCollections\020\004\022\021\n\r" - "PrivilegeLoad\020\005\022\024\n\020PrivilegeRelease\020\006\022\027\n" - "\023PrivilegeCompaction\020\007\022\023\n\017PrivilegeInser" - "t\020\010\022\023\n\017PrivilegeDelete\020\t\022\032\n\026PrivilegeGet" - "Statistics\020\n\022\030\n\024PrivilegeCreateIndex\020\013\022\030" - "\n\024PrivilegeIndexDetail\020\014\022\026\n\022PrivilegeDro" - "pIndex\020\r\022\023\n\017PrivilegeSearch\020\016\022\022\n\016Privile" - "geFlush\020\017\022\022\n\016PrivilegeQuery\020\020\022\030\n\024Privile" - "geLoadBalance\020\021\022\023\n\017PrivilegeImport\020\022\022\034\n\030" - "PrivilegeCreateOwnership\020\023\022\027\n\023PrivilegeU" - "pdateUser\020\024\022\032\n\026PrivilegeDropOwnership\020\025\022" - "\034\n\030PrivilegeSelectOwnership\020\026\022\034\n\030Privile" - "geManageOwnership\020\027\022\027\n\023PrivilegeSelectUs" - "er\020\030\022\023\n\017PrivilegeUpsert\020\031*S\n\tStateCode\022\020" - "\n\014Initializing\020\000\022\013\n\007Healthy\020\001\022\014\n\010Abnorma" - "l\020\002\022\013\n\007StandBy\020\003\022\014\n\010Stopping\020\004*c\n\tLoadSt" - "ate\022\025\n\021LoadStateNotExist\020\000\022\024\n\020LoadStateN" - "otLoad\020\001\022\024\n\020LoadStateLoading\020\002\022\023\n\017LoadSt" - "ateLoaded\020\003:^\n\021privilege_ext_obj\022\037.googl" - "e.protobuf.MessageOptions\030\351\007 \001(\0132!.milvu" - "s.proto.common.PrivilegeExtBf\n\016io.milvus" - ".grpcB\013CommonProtoP\001Z1github.com/milvus-" - "io/milvus-proto/go-api/commonpb\240\001\001\252\002\016IO." - "Milvus.Grpcb\006proto3" + "esourceGroup\020\245\r\022\027\n\022ListResourceGroups\020\246\r" + "\022\032\n\025DescribeResourceGroup\020\247\r\022\021\n\014Transfer" + "Node\020\250\r\022\024\n\017TransferReplica\020\251\r*\"\n\007DslType" + "\022\007\n\003Dsl\020\000\022\016\n\nBoolExprV1\020\001*B\n\017CompactionS" + "tate\022\021\n\rUndefiedState\020\000\022\r\n\tExecuting\020\001\022\r" + "\n\tCompleted\020\002*X\n\020ConsistencyLevel\022\n\n\006Str" + "ong\020\000\022\013\n\007Session\020\001\022\013\n\007Bounded\020\002\022\016\n\nEvent" + "ually\020\003\022\016\n\nCustomized\020\004*\236\001\n\013ImportState\022" + "\021\n\rImportPending\020\000\022\020\n\014ImportFailed\020\001\022\021\n\r" + "ImportStarted\020\002\022\023\n\017ImportPersisted\020\005\022\021\n\r" + "ImportFlushed\020\010\022\023\n\017ImportCompleted\020\006\022\032\n\026" + "ImportFailedAndCleaned\020\007*2\n\nObjectType\022\016" + "\n\nCollection\020\000\022\n\n\006Global\020\001\022\010\n\004User\020\002*\233\005\n" + "\017ObjectPrivilege\022\020\n\014PrivilegeAll\020\000\022\035\n\031Pr" + "ivilegeCreateCollection\020\001\022\033\n\027PrivilegeDr" + "opCollection\020\002\022\037\n\033PrivilegeDescribeColle" + "ction\020\003\022\034\n\030PrivilegeShowCollections\020\004\022\021\n" + "\rPrivilegeLoad\020\005\022\024\n\020PrivilegeRelease\020\006\022\027" + "\n\023PrivilegeCompaction\020\007\022\023\n\017PrivilegeInse" + "rt\020\010\022\023\n\017PrivilegeDelete\020\t\022\032\n\026PrivilegeGe" + "tStatistics\020\n\022\030\n\024PrivilegeCreateIndex\020\013\022" + "\030\n\024PrivilegeIndexDetail\020\014\022\026\n\022PrivilegeDr" + "opIndex\020\r\022\023\n\017PrivilegeSearch\020\016\022\022\n\016Privil" + "egeFlush\020\017\022\022\n\016PrivilegeQuery\020\020\022\030\n\024Privil" + "egeLoadBalance\020\021\022\023\n\017PrivilegeImport\020\022\022\034\n" + "\030PrivilegeCreateOwnership\020\023\022\027\n\023Privilege" + "UpdateUser\020\024\022\032\n\026PrivilegeDropOwnership\020\025" + "\022\034\n\030PrivilegeSelectOwnership\020\026\022\034\n\030Privil" + "egeManageOwnership\020\027\022\027\n\023PrivilegeSelectU" + "ser\020\030\022\023\n\017PrivilegeUpsert\020\031*S\n\tStateCode\022" + "\020\n\014Initializing\020\000\022\013\n\007Healthy\020\001\022\014\n\010Abnorm" + "al\020\002\022\013\n\007StandBy\020\003\022\014\n\010Stopping\020\004*c\n\tLoadS" + "tate\022\025\n\021LoadStateNotExist\020\000\022\024\n\020LoadState" + "NotLoad\020\001\022\024\n\020LoadStateLoading\020\002\022\023\n\017LoadS" + "tateLoaded\020\003:^\n\021privilege_ext_obj\022\037.goog" + "le.protobuf.MessageOptions\030\351\007 \001(\0132!.milv" + "us.proto.common.PrivilegeExtBf\n\016io.milvu" + "s.grpcB\013CommonProtoP\001Z1github.com/milvus" + "-io/milvus-proto/go-api/commonpb\240\001\001\252\002\016IO" + ".Milvus.Grpcb\006proto3" ; static const ::_pbi::DescriptorTable* const descriptor_table_common_2eproto_deps[1] = { &::descriptor_table_google_2fprotobuf_2fdescriptor_2eproto, }; static ::_pbi::once_flag descriptor_table_common_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_common_2eproto = { - false, false, 5859, descriptor_table_protodef_common_2eproto, + false, false, 5860, descriptor_table_protodef_common_2eproto, "common.proto", &descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_deps, 1, 11, schemas, file_default_instances, TableStruct_common_2eproto::offsets, diff --git a/internal/core/src/pb/common.pb.h b/internal/core/src/pb/common.pb.h index 2aba9850c4..fb7ed9ea88 100644 --- a/internal/core/src/pb/common.pb.h +++ b/internal/core/src/pb/common.pb.h @@ -354,7 +354,7 @@ enum MsgType : int { ListPolicy = 1609, CreateResourceGroup = 1700, DropResourceGroup = 1701, - ListResourceGroup = 1702, + ListResourceGroups = 1702, DescribeResourceGroup = 1703, TransferNode = 1704, TransferReplica = 1705, diff --git a/internal/distributed/proxy/service.go b/internal/distributed/proxy/service.go index 7de66fd887..b4c874ab41 100644 --- a/internal/distributed/proxy/service.go +++ b/internal/distributed/proxy/service.go @@ -869,25 +869,25 @@ func (s *Server) RenameCollection(ctx context.Context, req *milvuspb.RenameColle } func (s *Server) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { - return nil, nil + return s.proxy.CreateResourceGroup(ctx, req) } func (s *Server) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { - return nil, nil + return s.proxy.DropResourceGroup(ctx, req) } func (s *Server) DescribeResourceGroup(ctx context.Context, req *milvuspb.DescribeResourceGroupRequest) (*milvuspb.DescribeResourceGroupResponse, error) { - return nil, nil + return s.proxy.DescribeResourceGroup(ctx, req) } func (s *Server) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { - return nil, nil + return s.proxy.TransferNode(ctx, req) } func (s *Server) TransferReplica(ctx context.Context, req *milvuspb.TransferReplicaRequest) (*commonpb.Status, error) { - return nil, nil + return s.proxy.TransferReplica(ctx, req) } -func (s *Server) ListResourceGroup(ctx context.Context, req *milvuspb.ListResourceGroupRequest) (*milvuspb.ListResourceGroupResponse, error) { - return nil, nil +func (s *Server) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return s.proxy.ListResourceGroups(ctx, req) } diff --git a/internal/distributed/proxy/service_test.go b/internal/distributed/proxy/service_test.go index 574d96d8c6..a9d186acca 100644 --- a/internal/distributed/proxy/service_test.go +++ b/internal/distributed/proxy/service_test.go @@ -293,7 +293,7 @@ func (m *MockRootCoord) RenameCollection(ctx context.Context, req *milvuspb.Rena return nil, nil } -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// type MockQueryCoord struct { MockBase initErr error @@ -408,6 +408,30 @@ func (m *MockQueryCoord) CheckHealth(ctx context.Context, req *milvuspb.CheckHea }, nil } +func (m *MockQueryCoord) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + return nil, nil +} + +func (m *MockQueryCoord) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + return nil, nil +} + +func (m *MockQueryCoord) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + return nil, nil +} + +func (m *MockQueryCoord) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + return nil, nil +} + +func (m *MockQueryCoord) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + return nil, nil +} + +func (m *MockQueryCoord) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return nil, nil +} + // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// type MockDataCoord struct { MockBase @@ -935,7 +959,7 @@ func (m *MockProxy) TransferReplica(ctx context.Context, req *milvuspb.TransferR return nil, nil } -func (m *MockProxy) ListResourceGroup(ctx context.Context, req *milvuspb.ListResourceGroupRequest) (*milvuspb.ListResourceGroupResponse, error) { +func (m *MockProxy) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { return nil, nil } @@ -1380,6 +1404,36 @@ func Test_NewServer(t *testing.T) { assert.Nil(t, err) }) + t.Run("CreateResourceGroup", func(t *testing.T) { + _, err := server.CreateResourceGroup(ctx, nil) + assert.Nil(t, err) + }) + + t.Run("DropResourceGroup", func(t *testing.T) { + _, err := server.DropResourceGroup(ctx, nil) + assert.Nil(t, err) + }) + + t.Run("TransferNode", func(t *testing.T) { + _, err := server.TransferNode(ctx, nil) + assert.Nil(t, err) + }) + + t.Run("TransferReplica", func(t *testing.T) { + _, err := server.TransferReplica(ctx, nil) + assert.Nil(t, err) + }) + + t.Run("ListResourceGroups", func(t *testing.T) { + _, err := server.ListResourceGroups(ctx, nil) + assert.Nil(t, err) + }) + + t.Run("DescribeResourceGroup", func(t *testing.T) { + _, err := server.DescribeResourceGroup(ctx, nil) + assert.Nil(t, err) + }) + err = server.Stop() assert.Nil(t, err) diff --git a/internal/distributed/querycoord/client/client.go b/internal/distributed/querycoord/client/client.go index 97e2dddef2..61e681f24b 100644 --- a/internal/distributed/querycoord/client/client.go +++ b/internal/distributed/querycoord/client/client.go @@ -418,3 +418,111 @@ func (c *Client) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthReque } return ret.(*milvuspb.CheckHealthResponse), err } + +func (c *Client) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.CreateResourceGroup(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + +func (c *Client) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.DropResourceGroup(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + +func (c *Client) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.DescribeResourceGroup(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*querypb.DescribeResourceGroupResponse), err +} + +func (c *Client) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.TransferNode(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + +func (c *Client) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.TransferReplica(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*commonpb.Status), err +} + +func (c *Client) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + req = typeutil.Clone(req) + commonpbutil.UpdateMsgBase( + req.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)), + ) + ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) { + if !funcutil.CheckCtxValid(ctx) { + return nil, ctx.Err() + } + return client.ListResourceGroups(ctx, req) + }) + if err != nil || ret == nil { + return nil, err + } + return ret.(*milvuspb.ListResourceGroupsResponse), err +} diff --git a/internal/distributed/querycoord/client/client_test.go b/internal/distributed/querycoord/client/client_test.go index 6fb49d40a6..9a3984c59b 100644 --- a/internal/distributed/querycoord/client/client_test.go +++ b/internal/distributed/querycoord/client/client_test.go @@ -124,6 +124,24 @@ func Test_NewClient(t *testing.T) { r20, err := client.CheckHealth(ctx, nil) retCheck(retNotNil, r20, err) + + r21, err := client.CreateResourceGroup(ctx, nil) + retCheck(retNotNil, r21, err) + + r22, err := client.DropResourceGroup(ctx, nil) + retCheck(retNotNil, r22, err) + + r23, err := client.TransferNode(ctx, nil) + retCheck(retNotNil, r23, err) + + r24, err := client.TransferReplica(ctx, nil) + retCheck(retNotNil, r24, err) + + r26, err := client.ListResourceGroups(ctx, nil) + retCheck(retNotNil, r26, err) + + r27, err := client.DescribeResourceGroup(ctx, nil) + retCheck(retNotNil, r27, err) } client.grpcClient = &mock.GRPCClientBase[querypb.QueryCoordClient]{ diff --git a/internal/distributed/querycoord/service.go b/internal/distributed/querycoord/service.go index 61484a1730..2378cd3a30 100644 --- a/internal/distributed/querycoord/service.go +++ b/internal/distributed/querycoord/service.go @@ -364,3 +364,27 @@ func (s *Server) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) { return s.queryCoord.CheckHealth(ctx, req) } + +func (s *Server) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + return s.queryCoord.CreateResourceGroup(ctx, req) +} + +func (s *Server) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + return s.queryCoord.DropResourceGroup(ctx, req) +} + +func (s *Server) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + return s.queryCoord.TransferNode(ctx, req) +} + +func (s *Server) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + return s.queryCoord.TransferReplica(ctx, req) +} + +func (s *Server) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return s.queryCoord.ListResourceGroups(ctx, req) +} + +func (s *Server) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + return s.queryCoord.DescribeResourceGroup(ctx, req) +} diff --git a/internal/distributed/querycoord/service_test.go b/internal/distributed/querycoord/service_test.go index 594e553b7e..7c540f141e 100644 --- a/internal/distributed/querycoord/service_test.go +++ b/internal/distributed/querycoord/service_test.go @@ -162,6 +162,34 @@ func (m *MockQueryCoord) CheckHealth(ctx context.Context, req *milvuspb.CheckHea }, m.err } +func (m *MockQueryCoord) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + return m.status, nil +} + +func (m *MockQueryCoord) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + return m.status, nil +} + +func (m *MockQueryCoord) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + return m.status, nil +} + +func (m *MockQueryCoord) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + return m.status, nil +} + +func (m *MockQueryCoord) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return &milvuspb.ListResourceGroupsResponse{ + Status: m.status, + }, nil +} + +func (m *MockQueryCoord) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + return &querypb.DescribeResourceGroupResponse{ + Status: m.status, + }, nil +} + // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// type MockRootCoord struct { types.RootCoord @@ -371,6 +399,43 @@ func Test_NewServer(t *testing.T) { assert.Equal(t, true, ret.IsHealthy) }) + t.Run("CreateResourceGroup", func(t *testing.T) { + resp, err := server.CreateResourceGroup(ctx, nil) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) + }) + + t.Run("DropResourceGroup", func(t *testing.T) { + resp, err := server.DropResourceGroup(ctx, nil) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) + }) + + t.Run("TransferNode", func(t *testing.T) { + resp, err := server.TransferNode(ctx, nil) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) + }) + + t.Run("TransferReplica", func(t *testing.T) { + resp, err := server.TransferReplica(ctx, nil) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) + }) + + t.Run("ListResourceGroups", func(t *testing.T) { + req := &milvuspb.ListResourceGroupsRequest{} + resp, err := server.ListResourceGroups(ctx, req) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) + }) + + t.Run("DescribeResourceGroup", func(t *testing.T) { + resp, err := server.DescribeResourceGroup(ctx, nil) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) + }) + err = server.Stop() assert.Nil(t, err) } diff --git a/internal/metastore/catalog.go b/internal/metastore/catalog.go index 1c69f50df7..be488e83c6 100644 --- a/internal/metastore/catalog.go +++ b/internal/metastore/catalog.go @@ -159,4 +159,7 @@ type QueryCoordCatalog interface { ReleasePartition(collection int64, partitions ...int64) error ReleaseReplicas(collectionID int64) error ReleaseReplica(collection, replica int64) error + SaveResourceGroup(rgs ...*querypb.ResourceGroup) error + RemoveResourceGroup(rgName string) error + GetResourceGroups() ([]*querypb.ResourceGroup, error) } diff --git a/internal/proto/query_coord.proto b/internal/proto/query_coord.proto index 4da987699f..55fbc8305d 100644 --- a/internal/proto/query_coord.proto +++ b/internal/proto/query_coord.proto @@ -36,6 +36,13 @@ service QueryCoord { rpc GetShardLeaders(GetShardLeadersRequest) returns (GetShardLeadersResponse) {} rpc CheckHealth(milvus.CheckHealthRequest) returns (milvus.CheckHealthResponse) {} + + rpc CreateResourceGroup(milvus.CreateResourceGroupRequest) returns (common.Status) {} + rpc DropResourceGroup(milvus.DropResourceGroupRequest) returns (common.Status) {} + rpc TransferNode(milvus.TransferNodeRequest) returns (common.Status) {} + rpc TransferReplica(TransferReplicaRequest) returns (common.Status) {} + rpc ListResourceGroups(milvus.ListResourceGroupsRequest) returns (milvus.ListResourceGroupsResponse) {} + rpc DescribeResourceGroup(DescribeResourceGroupRequest) returns (DescribeResourceGroupResponse) {} } service QueryNode { @@ -101,6 +108,8 @@ message LoadCollectionRequest { // fieldID -> indexID map field_indexID = 6; bool refresh = 7; + // resource group names + repeated string resource_groups = 8; } message ReleaseCollectionRequest { @@ -128,6 +137,8 @@ message LoadPartitionsRequest { // fieldID -> indexID map field_indexID = 7; bool refresh = 8; + // resource group names + repeated string resource_groups = 9; } message ReleasePartitionsRequest { @@ -488,6 +499,7 @@ message Replica { int64 ID = 1; int64 collectionID = 2; repeated int64 nodes = 3; + string resource_group = 4; } enum SyncType { @@ -510,3 +522,39 @@ message SyncDistributionRequest { repeated SyncAction actions = 4; } +message ResourceGroup { + string name = 1; + int32 capacity = 2; + repeated int64 nodes = 3; +} + +// transfer `replicaNum` replicas in `collectionID` from `source_resource_group` to `target_resource_groups` +message TransferReplicaRequest { + common.MsgBase base = 1; + string source_resource_group = 2; + string target_resource_group = 3; + int64 collectionID = 4; + int64 num_replica = 5; +} + +message DescribeResourceGroupRequest { + common.MsgBase base = 1; + string resource_group = 2; +} + +message DescribeResourceGroupResponse { + common.Status status = 1; + ResourceGroupInfo resource_group = 2; +} + +message ResourceGroupInfo { + string name = 1; + int32 capacity = 2; + int32 num_available_node = 3; + // collection id -> loaded replica num + map num_loaded_replica = 4; + // collection id -> accessed other rg's node num + map num_outgoing_node = 5; + // collection id -> be accessed node num by other rg + map num_incoming_node = 6; +} \ No newline at end of file diff --git a/internal/proto/querypb/query_coord.pb.go b/internal/proto/querypb/query_coord.pb.go index 78ed02b736..5c398c846b 100644 --- a/internal/proto/querypb/query_coord.pb.go +++ b/internal/proto/querypb/query_coord.pb.go @@ -460,11 +460,13 @@ type LoadCollectionRequest struct { Schema *schemapb.CollectionSchema `protobuf:"bytes,4,opt,name=schema,proto3" json:"schema,omitempty"` ReplicaNumber int32 `protobuf:"varint,5,opt,name=replica_number,json=replicaNumber,proto3" json:"replica_number,omitempty"` // fieldID -> indexID - FieldIndexID map[int64]int64 `protobuf:"bytes,6,rep,name=field_indexID,json=fieldIndexID,proto3" json:"field_indexID,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - Refresh bool `protobuf:"varint,7,opt,name=refresh,proto3" json:"refresh,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + FieldIndexID map[int64]int64 `protobuf:"bytes,6,rep,name=field_indexID,json=fieldIndexID,proto3" json:"field_indexID,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Refresh bool `protobuf:"varint,7,opt,name=refresh,proto3" json:"refresh,omitempty"` + // resource group names + ResourceGroups []string `protobuf:"bytes,8,rep,name=resource_groups,json=resourceGroups,proto3" json:"resource_groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LoadCollectionRequest) Reset() { *m = LoadCollectionRequest{} } @@ -541,6 +543,13 @@ func (m *LoadCollectionRequest) GetRefresh() bool { return false } +func (m *LoadCollectionRequest) GetResourceGroups() []string { + if m != nil { + return m.ResourceGroups + } + return nil +} + type ReleaseCollectionRequest struct { Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` DbID int64 `protobuf:"varint,2,opt,name=dbID,proto3" json:"dbID,omitempty"` @@ -683,11 +692,13 @@ type LoadPartitionsRequest struct { Schema *schemapb.CollectionSchema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` ReplicaNumber int32 `protobuf:"varint,6,opt,name=replica_number,json=replicaNumber,proto3" json:"replica_number,omitempty"` // fieldID -> indexID - FieldIndexID map[int64]int64 `protobuf:"bytes,7,rep,name=field_indexID,json=fieldIndexID,proto3" json:"field_indexID,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - Refresh bool `protobuf:"varint,8,opt,name=refresh,proto3" json:"refresh,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + FieldIndexID map[int64]int64 `protobuf:"bytes,7,rep,name=field_indexID,json=fieldIndexID,proto3" json:"field_indexID,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Refresh bool `protobuf:"varint,8,opt,name=refresh,proto3" json:"refresh,omitempty"` + // resource group names + ResourceGroups []string `protobuf:"bytes,9,rep,name=resource_groups,json=resourceGroups,proto3" json:"resource_groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LoadPartitionsRequest) Reset() { *m = LoadPartitionsRequest{} } @@ -771,6 +782,13 @@ func (m *LoadPartitionsRequest) GetRefresh() bool { return false } +func (m *LoadPartitionsRequest) GetResourceGroups() []string { + if m != nil { + return m.ResourceGroups + } + return nil +} + type ReleasePartitionsRequest struct { Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` DbID int64 `protobuf:"varint,2,opt,name=dbID,proto3" json:"dbID,omitempty"` @@ -3497,6 +3515,7 @@ type Replica struct { ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"` Nodes []int64 `protobuf:"varint,3,rep,packed,name=nodes,proto3" json:"nodes,omitempty"` + ResourceGroup string `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3" json:"resource_group,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3548,6 +3567,13 @@ func (m *Replica) GetNodes() []int64 { return nil } +func (m *Replica) GetResourceGroup() string { + if m != nil { + return m.ResourceGroup + } + return "" +} + type SyncAction struct { Type SyncType `protobuf:"varint,1,opt,name=type,proto3,enum=milvus.proto.query.SyncType" json:"type,omitempty"` PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"` @@ -3682,6 +3708,309 @@ func (m *SyncDistributionRequest) GetActions() []*SyncAction { return nil } +type ResourceGroup struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Capacity int32 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"` + Nodes []int64 `protobuf:"varint,3,rep,packed,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceGroup) Reset() { *m = ResourceGroup{} } +func (m *ResourceGroup) String() string { return proto.CompactTextString(m) } +func (*ResourceGroup) ProtoMessage() {} +func (*ResourceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{49} +} + +func (m *ResourceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceGroup.Unmarshal(m, b) +} +func (m *ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceGroup.Marshal(b, m, deterministic) +} +func (m *ResourceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceGroup.Merge(m, src) +} +func (m *ResourceGroup) XXX_Size() int { + return xxx_messageInfo_ResourceGroup.Size(m) +} +func (m *ResourceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceGroup proto.InternalMessageInfo + +func (m *ResourceGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceGroup) GetCapacity() int32 { + if m != nil { + return m.Capacity + } + return 0 +} + +func (m *ResourceGroup) GetNodes() []int64 { + if m != nil { + return m.Nodes + } + return nil +} + +// transfer `replicaNum` replicas in `collectionID` from `source_resource_group` to `target_resource_groups` +type TransferReplicaRequest struct { + Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` + SourceResourceGroup string `protobuf:"bytes,2,opt,name=source_resource_group,json=sourceResourceGroup,proto3" json:"source_resource_group,omitempty"` + TargetResourceGroup string `protobuf:"bytes,3,opt,name=target_resource_group,json=targetResourceGroup,proto3" json:"target_resource_group,omitempty"` + CollectionID int64 `protobuf:"varint,4,opt,name=collectionID,proto3" json:"collectionID,omitempty"` + NumReplica int64 `protobuf:"varint,5,opt,name=num_replica,json=numReplica,proto3" json:"num_replica,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferReplicaRequest) Reset() { *m = TransferReplicaRequest{} } +func (m *TransferReplicaRequest) String() string { return proto.CompactTextString(m) } +func (*TransferReplicaRequest) ProtoMessage() {} +func (*TransferReplicaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{50} +} + +func (m *TransferReplicaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransferReplicaRequest.Unmarshal(m, b) +} +func (m *TransferReplicaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransferReplicaRequest.Marshal(b, m, deterministic) +} +func (m *TransferReplicaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferReplicaRequest.Merge(m, src) +} +func (m *TransferReplicaRequest) XXX_Size() int { + return xxx_messageInfo_TransferReplicaRequest.Size(m) +} +func (m *TransferReplicaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TransferReplicaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferReplicaRequest proto.InternalMessageInfo + +func (m *TransferReplicaRequest) GetBase() *commonpb.MsgBase { + if m != nil { + return m.Base + } + return nil +} + +func (m *TransferReplicaRequest) GetSourceResourceGroup() string { + if m != nil { + return m.SourceResourceGroup + } + return "" +} + +func (m *TransferReplicaRequest) GetTargetResourceGroup() string { + if m != nil { + return m.TargetResourceGroup + } + return "" +} + +func (m *TransferReplicaRequest) GetCollectionID() int64 { + if m != nil { + return m.CollectionID + } + return 0 +} + +func (m *TransferReplicaRequest) GetNumReplica() int64 { + if m != nil { + return m.NumReplica + } + return 0 +} + +type DescribeResourceGroupRequest struct { + Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` + ResourceGroup string `protobuf:"bytes,2,opt,name=resource_group,json=resourceGroup,proto3" json:"resource_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescribeResourceGroupRequest) Reset() { *m = DescribeResourceGroupRequest{} } +func (m *DescribeResourceGroupRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeResourceGroupRequest) ProtoMessage() {} +func (*DescribeResourceGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{51} +} + +func (m *DescribeResourceGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescribeResourceGroupRequest.Unmarshal(m, b) +} +func (m *DescribeResourceGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescribeResourceGroupRequest.Marshal(b, m, deterministic) +} +func (m *DescribeResourceGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescribeResourceGroupRequest.Merge(m, src) +} +func (m *DescribeResourceGroupRequest) XXX_Size() int { + return xxx_messageInfo_DescribeResourceGroupRequest.Size(m) +} +func (m *DescribeResourceGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DescribeResourceGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DescribeResourceGroupRequest proto.InternalMessageInfo + +func (m *DescribeResourceGroupRequest) GetBase() *commonpb.MsgBase { + if m != nil { + return m.Base + } + return nil +} + +func (m *DescribeResourceGroupRequest) GetResourceGroup() string { + if m != nil { + return m.ResourceGroup + } + return "" +} + +type DescribeResourceGroupResponse struct { + Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ResourceGroup *ResourceGroupInfo `protobuf:"bytes,2,opt,name=resource_group,json=resourceGroup,proto3" json:"resource_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescribeResourceGroupResponse) Reset() { *m = DescribeResourceGroupResponse{} } +func (m *DescribeResourceGroupResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeResourceGroupResponse) ProtoMessage() {} +func (*DescribeResourceGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{52} +} + +func (m *DescribeResourceGroupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescribeResourceGroupResponse.Unmarshal(m, b) +} +func (m *DescribeResourceGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescribeResourceGroupResponse.Marshal(b, m, deterministic) +} +func (m *DescribeResourceGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescribeResourceGroupResponse.Merge(m, src) +} +func (m *DescribeResourceGroupResponse) XXX_Size() int { + return xxx_messageInfo_DescribeResourceGroupResponse.Size(m) +} +func (m *DescribeResourceGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DescribeResourceGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DescribeResourceGroupResponse proto.InternalMessageInfo + +func (m *DescribeResourceGroupResponse) GetStatus() *commonpb.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *DescribeResourceGroupResponse) GetResourceGroup() *ResourceGroupInfo { + if m != nil { + return m.ResourceGroup + } + return nil +} + +type ResourceGroupInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Capacity int32 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"` + NumAvailableNode int32 `protobuf:"varint,3,opt,name=num_available_node,json=numAvailableNode,proto3" json:"num_available_node,omitempty"` + // collection id -> loaded replica num + NumLoadedReplica map[int64]int32 `protobuf:"bytes,4,rep,name=num_loaded_replica,json=numLoadedReplica,proto3" json:"num_loaded_replica,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // collection id -> accessed other rg's node num + NumOutgoingNode map[int64]int32 `protobuf:"bytes,5,rep,name=num_outgoing_node,json=numOutgoingNode,proto3" json:"num_outgoing_node,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // collection id -> be accessed node num by other rg + NumIncomingNode map[int64]int32 `protobuf:"bytes,6,rep,name=num_incoming_node,json=numIncomingNode,proto3" json:"num_incoming_node,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceGroupInfo) Reset() { *m = ResourceGroupInfo{} } +func (m *ResourceGroupInfo) String() string { return proto.CompactTextString(m) } +func (*ResourceGroupInfo) ProtoMessage() {} +func (*ResourceGroupInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_aab7cc9a69ed26e8, []int{53} +} + +func (m *ResourceGroupInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceGroupInfo.Unmarshal(m, b) +} +func (m *ResourceGroupInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceGroupInfo.Marshal(b, m, deterministic) +} +func (m *ResourceGroupInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceGroupInfo.Merge(m, src) +} +func (m *ResourceGroupInfo) XXX_Size() int { + return xxx_messageInfo_ResourceGroupInfo.Size(m) +} +func (m *ResourceGroupInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceGroupInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceGroupInfo proto.InternalMessageInfo + +func (m *ResourceGroupInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceGroupInfo) GetCapacity() int32 { + if m != nil { + return m.Capacity + } + return 0 +} + +func (m *ResourceGroupInfo) GetNumAvailableNode() int32 { + if m != nil { + return m.NumAvailableNode + } + return 0 +} + +func (m *ResourceGroupInfo) GetNumLoadedReplica() map[int64]int32 { + if m != nil { + return m.NumLoadedReplica + } + return nil +} + +func (m *ResourceGroupInfo) GetNumOutgoingNode() map[int64]int32 { + if m != nil { + return m.NumOutgoingNode + } + return nil +} + +func (m *ResourceGroupInfo) GetNumIncomingNode() map[int64]int32 { + if m != nil { + return m.NumIncomingNode + } + return nil +} + func init() { proto.RegisterEnum("milvus.proto.query.DataScope", DataScope_name, DataScope_value) proto.RegisterEnum("milvus.proto.query.PartitionState", PartitionState_name, PartitionState_value) @@ -3745,247 +4074,280 @@ func init() { proto.RegisterType((*Replica)(nil), "milvus.proto.query.Replica") proto.RegisterType((*SyncAction)(nil), "milvus.proto.query.SyncAction") proto.RegisterType((*SyncDistributionRequest)(nil), "milvus.proto.query.SyncDistributionRequest") + proto.RegisterType((*ResourceGroup)(nil), "milvus.proto.query.ResourceGroup") + proto.RegisterType((*TransferReplicaRequest)(nil), "milvus.proto.query.TransferReplicaRequest") + proto.RegisterType((*DescribeResourceGroupRequest)(nil), "milvus.proto.query.DescribeResourceGroupRequest") + proto.RegisterType((*DescribeResourceGroupResponse)(nil), "milvus.proto.query.DescribeResourceGroupResponse") + proto.RegisterType((*ResourceGroupInfo)(nil), "milvus.proto.query.ResourceGroupInfo") + proto.RegisterMapType((map[int64]int32)(nil), "milvus.proto.query.ResourceGroupInfo.NumIncomingNodeEntry") + proto.RegisterMapType((map[int64]int32)(nil), "milvus.proto.query.ResourceGroupInfo.NumLoadedReplicaEntry") + proto.RegisterMapType((map[int64]int32)(nil), "milvus.proto.query.ResourceGroupInfo.NumOutgoingNodeEntry") } func init() { proto.RegisterFile("query_coord.proto", fileDescriptor_aab7cc9a69ed26e8) } var fileDescriptor_aab7cc9a69ed26e8 = []byte{ - // 3760 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x8c, 0x1c, 0x49, - 0x56, 0xce, 0xfa, 0x74, 0x57, 0xbd, 0xfa, 0x74, 0x76, 0xb4, 0x3f, 0xb5, 0xb5, 0x1e, 0x4f, 0x4f, - 0x7a, 0x3c, 0xd3, 0xf4, 0xec, 0xb4, 0x67, 0xdb, 0xbb, 0x83, 0x97, 0xdd, 0xd5, 0x62, 0x77, 0xaf, - 0x7b, 0x9a, 0x19, 0x7b, 0x9b, 0x2c, 0xdb, 0xa0, 0xd1, 0xb0, 0xb5, 0x59, 0x95, 0x51, 0xd5, 0x29, - 0x67, 0x65, 0x96, 0x33, 0xb2, 0xda, 0xd3, 0xc3, 0x95, 0xcb, 0x22, 0x40, 0x82, 0x03, 0x27, 0xc4, - 0x69, 0x91, 0x40, 0x62, 0x24, 0x0e, 0x70, 0xe3, 0x80, 0x84, 0x04, 0x9c, 0x10, 0x37, 0x8e, 0x5c, - 0x91, 0x40, 0x42, 0x42, 0xda, 0x03, 0x07, 0x24, 0x14, 0xbf, 0xfc, 0x46, 0x76, 0xa5, 0xdd, 0x9e, - 0x1f, 0xda, 0x5b, 0xe5, 0x8b, 0xcf, 0x7b, 0xf1, 0xfe, 0xef, 0x45, 0x14, 0xac, 0x3f, 0x5d, 0xe0, - 0xe0, 0x74, 0x38, 0xf6, 0xfd, 0xc0, 0xde, 0x99, 0x07, 0x7e, 0xe8, 0x23, 0x34, 0x73, 0xdc, 0x93, - 0x05, 0xe1, 0x5f, 0x3b, 0x6c, 0xbc, 0xdf, 0x1e, 0xfb, 0xb3, 0x99, 0xef, 0x71, 0x58, 0xbf, 0x9d, - 0x9c, 0xd1, 0xef, 0x3a, 0x5e, 0x88, 0x03, 0xcf, 0x72, 0xe5, 0x28, 0x19, 0x1f, 0xe3, 0x99, 0x25, - 0xbe, 0x74, 0xdb, 0x0a, 0xad, 0xe4, 0xfe, 0xc6, 0xef, 0x68, 0x70, 0x79, 0x70, 0xec, 0x3f, 0xdb, - 0xf3, 0x5d, 0x17, 0x8f, 0x43, 0xc7, 0xf7, 0x88, 0x89, 0x9f, 0x2e, 0x30, 0x09, 0xd1, 0x3b, 0x50, - 0x1b, 0x59, 0x04, 0xf7, 0xb4, 0x4d, 0x6d, 0xab, 0xb5, 0x7b, 0x75, 0x27, 0x45, 0x89, 0x20, 0xe1, - 0x3e, 0x99, 0xde, 0xb5, 0x08, 0x36, 0xd9, 0x4c, 0x84, 0xa0, 0x66, 0x8f, 0x0e, 0xf7, 0x7b, 0x95, - 0x4d, 0x6d, 0xab, 0x6a, 0xb2, 0xdf, 0xe8, 0x75, 0xe8, 0x8c, 0xa3, 0xbd, 0x0f, 0xf7, 0x49, 0xaf, - 0xba, 0x59, 0xdd, 0xaa, 0x9a, 0x69, 0xa0, 0xf1, 0x6f, 0x1a, 0x5c, 0xc9, 0x91, 0x41, 0xe6, 0xbe, - 0x47, 0x30, 0xba, 0x05, 0x2b, 0x24, 0xb4, 0xc2, 0x05, 0x11, 0x94, 0x7c, 0x5d, 0x49, 0xc9, 0x80, - 0x4d, 0x31, 0xc5, 0xd4, 0x3c, 0xda, 0x8a, 0x02, 0x2d, 0xfa, 0x26, 0x5c, 0x74, 0xbc, 0xfb, 0x78, - 0xe6, 0x07, 0xa7, 0xc3, 0x39, 0x0e, 0xc6, 0xd8, 0x0b, 0xad, 0x29, 0x96, 0x34, 0x6e, 0xc8, 0xb1, - 0xa3, 0x78, 0x08, 0xbd, 0x0b, 0x57, 0xb8, 0x94, 0x08, 0x0e, 0x4e, 0x9c, 0x31, 0x1e, 0x5a, 0x27, - 0x96, 0xe3, 0x5a, 0x23, 0x17, 0xf7, 0x6a, 0x9b, 0xd5, 0xad, 0x86, 0x79, 0x89, 0x0d, 0x0f, 0xf8, - 0xe8, 0x1d, 0x39, 0x68, 0xfc, 0x99, 0x06, 0x97, 0xe8, 0x09, 0x8f, 0xac, 0x20, 0x74, 0x3e, 0x03, - 0x3e, 0x1b, 0xd0, 0x4e, 0x9e, 0xad, 0x57, 0x65, 0x63, 0x29, 0x18, 0x9d, 0x33, 0x97, 0xe8, 0x29, - 0x4f, 0x6a, 0xec, 0x98, 0x29, 0x98, 0xf1, 0x33, 0xa1, 0x10, 0x49, 0x3a, 0xcf, 0x23, 0x88, 0x2c, - 0xce, 0x4a, 0x1e, 0xe7, 0x0b, 0x88, 0xc1, 0xf8, 0x59, 0x15, 0x2e, 0x7d, 0xe0, 0x5b, 0x76, 0xac, - 0x30, 0x9f, 0x3f, 0x3b, 0xbf, 0x0f, 0x2b, 0xdc, 0xba, 0x7a, 0x35, 0x86, 0xeb, 0x46, 0x1a, 0x97, - 0xb0, 0xbc, 0x98, 0xc2, 0x01, 0x03, 0x98, 0x62, 0x11, 0xba, 0x01, 0xdd, 0x00, 0xcf, 0x5d, 0x67, - 0x6c, 0x0d, 0xbd, 0xc5, 0x6c, 0x84, 0x83, 0x5e, 0x7d, 0x53, 0xdb, 0xaa, 0x9b, 0x1d, 0x01, 0x7d, - 0xc0, 0x80, 0xe8, 0x27, 0xd0, 0x99, 0x38, 0xd8, 0xb5, 0x87, 0x8e, 0x67, 0xe3, 0x8f, 0x0f, 0xf7, - 0x7b, 0x2b, 0x9b, 0xd5, 0xad, 0xd6, 0xee, 0x77, 0x77, 0xf2, 0x9e, 0x61, 0x47, 0xc9, 0x91, 0x9d, - 0x7b, 0x74, 0xf9, 0x21, 0x5f, 0xfd, 0x43, 0x2f, 0x0c, 0x4e, 0xcd, 0xf6, 0x24, 0x01, 0x42, 0x3d, - 0x58, 0x0d, 0xf0, 0x24, 0xc0, 0xe4, 0xb8, 0xb7, 0xba, 0xa9, 0x6d, 0x35, 0x4c, 0xf9, 0xd9, 0xff, - 0x01, 0xac, 0xe7, 0x16, 0x23, 0x1d, 0xaa, 0x4f, 0xf0, 0x29, 0xe3, 0x6f, 0xd5, 0xa4, 0x3f, 0xd1, - 0x45, 0xa8, 0x9f, 0x58, 0xee, 0x02, 0x0b, 0x0e, 0xf2, 0x8f, 0x5f, 0xa9, 0xdc, 0xd6, 0x8c, 0x3f, - 0xd1, 0xa0, 0x67, 0x62, 0x17, 0x5b, 0x04, 0x7f, 0x91, 0x92, 0xba, 0x0c, 0x2b, 0x9e, 0x6f, 0xe3, - 0xc3, 0x7d, 0x26, 0xa9, 0xaa, 0x29, 0xbe, 0x8c, 0xff, 0xd1, 0xe0, 0xe2, 0x01, 0x0e, 0xa9, 0xca, - 0x3a, 0x24, 0x74, 0xc6, 0x91, 0x4d, 0x7e, 0x1f, 0xaa, 0x01, 0x7e, 0x2a, 0x28, 0x7b, 0x2b, 0x4d, - 0x59, 0xe4, 0x61, 0x55, 0x2b, 0x4d, 0xba, 0x0e, 0xbd, 0x06, 0x6d, 0x7b, 0xe6, 0x0e, 0xc7, 0xc7, - 0x96, 0xe7, 0x61, 0x97, 0x2b, 0x7d, 0xd3, 0x6c, 0xd9, 0x33, 0x77, 0x4f, 0x80, 0xd0, 0x35, 0x00, - 0x82, 0xa7, 0x33, 0xec, 0x85, 0xb1, 0x53, 0x4c, 0x40, 0xd0, 0x36, 0xac, 0x4f, 0x02, 0x7f, 0x36, - 0x24, 0xc7, 0x56, 0x60, 0x0f, 0x5d, 0x6c, 0xd9, 0x38, 0x60, 0xd4, 0x37, 0xcc, 0x35, 0x3a, 0x30, - 0xa0, 0xf0, 0x0f, 0x18, 0x18, 0xdd, 0x82, 0x3a, 0x19, 0xfb, 0x73, 0xcc, 0x14, 0xa8, 0xbb, 0xfb, - 0x8a, 0x4a, 0x35, 0xf6, 0xad, 0xd0, 0x1a, 0xd0, 0x49, 0x26, 0x9f, 0x6b, 0xfc, 0x93, 0xb0, 0xa0, - 0x2f, 0xb9, 0x43, 0x4a, 0x58, 0x59, 0xfd, 0xe5, 0x58, 0xd9, 0x4a, 0x29, 0x2b, 0x5b, 0x3d, 0xdb, - 0xca, 0x72, 0x5c, 0x7b, 0x1e, 0x2b, 0x6b, 0xbc, 0x64, 0x2b, 0xfb, 0xbb, 0xd8, 0xca, 0xbe, 0xec, - 0xd2, 0x8c, 0x2d, 0xb1, 0x9e, 0xb2, 0xc4, 0xbf, 0xd0, 0xe0, 0x6b, 0x07, 0x38, 0x8c, 0xc8, 0xa7, - 0x86, 0x85, 0xbf, 0xa4, 0x21, 0xf2, 0x53, 0x0d, 0xfa, 0x2a, 0x5a, 0xcf, 0x13, 0x26, 0x3f, 0x84, - 0xcb, 0x11, 0x8e, 0xa1, 0x8d, 0xc9, 0x38, 0x70, 0xe6, 0x4c, 0x8c, 0xcc, 0x77, 0xb4, 0x76, 0xaf, - 0xab, 0x14, 0x31, 0x4b, 0xc1, 0xa5, 0x68, 0x8b, 0xfd, 0xc4, 0x0e, 0xc6, 0xef, 0x6b, 0x70, 0x89, - 0xfa, 0x2a, 0xe1, 0x5c, 0xbc, 0x89, 0xff, 0xe2, 0x7c, 0x4d, 0xbb, 0xad, 0x4a, 0xce, 0x6d, 0x95, - 0xe0, 0x31, 0xcb, 0x39, 0xb3, 0xf4, 0x9c, 0x87, 0x77, 0xdf, 0x86, 0xba, 0xe3, 0x4d, 0x7c, 0xc9, - 0xaa, 0x57, 0x55, 0xac, 0x4a, 0x22, 0xe3, 0xb3, 0x0d, 0x8f, 0x53, 0x11, 0xfb, 0xd1, 0x73, 0xa8, - 0x5b, 0xf6, 0xd8, 0x15, 0xc5, 0xb1, 0x7f, 0x4f, 0x83, 0x2b, 0x39, 0x84, 0xe7, 0x39, 0xf7, 0xf7, - 0x60, 0x85, 0x45, 0x07, 0x79, 0xf0, 0xd7, 0x95, 0x07, 0x4f, 0xa0, 0xfb, 0xc0, 0x21, 0xa1, 0x29, - 0xd6, 0x18, 0x3e, 0xe8, 0xd9, 0x31, 0x1a, 0xb7, 0x44, 0xcc, 0x1a, 0x7a, 0xd6, 0x8c, 0x33, 0xa0, - 0x69, 0xb6, 0x04, 0xec, 0x81, 0x35, 0xc3, 0xe8, 0x6b, 0xd0, 0xa0, 0x26, 0x3b, 0x74, 0x6c, 0x29, - 0xfe, 0x55, 0x66, 0xc2, 0x36, 0x41, 0xaf, 0x00, 0xb0, 0x21, 0xcb, 0xb6, 0x03, 0x1e, 0xd2, 0x9a, - 0x66, 0x93, 0x42, 0xee, 0x50, 0x80, 0xf1, 0x87, 0x1a, 0xb4, 0xa9, 0xeb, 0xbc, 0x8f, 0x43, 0x8b, - 0xca, 0x01, 0x7d, 0x07, 0x9a, 0xae, 0x6f, 0xd9, 0xc3, 0xf0, 0x74, 0xce, 0x51, 0x75, 0xb3, 0xbc, - 0x8e, 0xfd, 0xed, 0xc3, 0xd3, 0x39, 0x36, 0x1b, 0xae, 0xf8, 0x55, 0x86, 0xdf, 0x39, 0x53, 0xae, - 0x2a, 0x4c, 0xf9, 0x1f, 0xea, 0x70, 0xf9, 0x37, 0xac, 0x70, 0x7c, 0xbc, 0x3f, 0x93, 0x91, 0xf9, - 0xc5, 0x95, 0x20, 0xf6, 0x6d, 0x95, 0xa4, 0x6f, 0x7b, 0x69, 0xbe, 0x33, 0xd2, 0xf3, 0xba, 0x4a, - 0xcf, 0x69, 0x69, 0xb7, 0xf3, 0x58, 0x88, 0x2a, 0xa1, 0xe7, 0x89, 0x00, 0xba, 0xf2, 0x22, 0x01, - 0x74, 0x0f, 0x3a, 0xf8, 0xe3, 0xb1, 0xbb, 0xa0, 0x32, 0x67, 0xd8, 0x79, 0x64, 0xbc, 0xa6, 0xc0, - 0x9e, 0x34, 0xb2, 0xb6, 0x58, 0x74, 0x28, 0x68, 0xe0, 0xa2, 0x9e, 0xe1, 0xd0, 0x62, 0xe1, 0xaf, - 0xb5, 0xbb, 0x59, 0x24, 0x6a, 0xa9, 0x1f, 0x5c, 0xdc, 0xf4, 0x0b, 0x5d, 0x85, 0xa6, 0x08, 0xd7, - 0x87, 0xfb, 0xbd, 0x26, 0x63, 0x5f, 0x0c, 0x40, 0x16, 0x74, 0x84, 0x07, 0x12, 0x14, 0x02, 0xa3, - 0xf0, 0x7b, 0x2a, 0x04, 0x6a, 0x61, 0x27, 0x29, 0x27, 0x22, 0x78, 0x93, 0x04, 0x88, 0x96, 0x93, - 0xfe, 0x64, 0xe2, 0x3a, 0x1e, 0x7e, 0xc0, 0x25, 0xdc, 0x62, 0x44, 0xa4, 0x81, 0x34, 0xc4, 0x9f, - 0xe0, 0x80, 0x38, 0xbe, 0xd7, 0x6b, 0xb3, 0x71, 0xf9, 0xd9, 0x1f, 0xc2, 0x7a, 0x0e, 0x85, 0x22, - 0xc4, 0x7f, 0x2b, 0x19, 0xe2, 0x97, 0xf3, 0x38, 0x91, 0x02, 0xfc, 0xb9, 0x06, 0x97, 0x1e, 0x79, - 0x64, 0x31, 0x8a, 0xce, 0xf6, 0xc5, 0xe8, 0x71, 0xd6, 0x83, 0xd4, 0x72, 0x1e, 0xc4, 0xf8, 0x69, - 0x1d, 0xd6, 0xc4, 0x29, 0xa8, 0xb8, 0x99, 0x2b, 0xb8, 0x0a, 0xcd, 0x28, 0x88, 0x08, 0x86, 0xc4, - 0x00, 0xb4, 0x09, 0xad, 0x84, 0x21, 0x08, 0xaa, 0x92, 0xa0, 0x52, 0xa4, 0xc9, 0x94, 0xa0, 0x96, - 0x48, 0x09, 0x5e, 0x01, 0x98, 0xb8, 0x0b, 0x72, 0x3c, 0x0c, 0x9d, 0x19, 0x16, 0x29, 0x49, 0x93, - 0x41, 0x1e, 0x3a, 0x33, 0x8c, 0xee, 0x40, 0x7b, 0xe4, 0x78, 0xae, 0x3f, 0x1d, 0xce, 0xad, 0xf0, - 0x98, 0x88, 0xd2, 0x4b, 0x25, 0x16, 0x96, 0xc0, 0xdd, 0x65, 0x73, 0xcd, 0x16, 0x5f, 0x73, 0x44, - 0x97, 0xa0, 0x6b, 0xd0, 0xf2, 0x16, 0xb3, 0xa1, 0x3f, 0x19, 0x06, 0xfe, 0x33, 0xc2, 0x0a, 0xac, - 0xaa, 0xd9, 0xf4, 0x16, 0xb3, 0x1f, 0x4d, 0x4c, 0xff, 0x19, 0x75, 0xe2, 0x4d, 0xea, 0xce, 0x89, - 0xeb, 0x4f, 0x49, 0xaf, 0x51, 0x6a, 0xff, 0x78, 0x01, 0x5d, 0x6d, 0x63, 0x37, 0xb4, 0xd8, 0xea, - 0x66, 0xb9, 0xd5, 0xd1, 0x02, 0xf4, 0x06, 0x74, 0xc7, 0xfe, 0x6c, 0x6e, 0x31, 0x0e, 0xdd, 0x0b, - 0xfc, 0x19, 0xb3, 0x9c, 0xaa, 0x99, 0x81, 0xa2, 0x3d, 0x68, 0xb1, 0xb4, 0x58, 0x98, 0x57, 0x8b, - 0xe1, 0x31, 0x54, 0xe6, 0x95, 0xc8, 0x63, 0xa9, 0x82, 0x82, 0x23, 0x7f, 0x12, 0xaa, 0x19, 0xd2, - 0x4a, 0x89, 0xf3, 0x09, 0x16, 0x16, 0xd2, 0x12, 0xb0, 0x81, 0xf3, 0x09, 0xa6, 0xb9, 0xba, 0xe3, - 0x11, 0x1c, 0x84, 0xb2, 0x72, 0xea, 0x75, 0x98, 0xfa, 0x74, 0x38, 0x54, 0x28, 0x36, 0x3a, 0x84, - 0x2e, 0x09, 0xad, 0x20, 0x1c, 0xce, 0x7d, 0xc2, 0x14, 0xa0, 0xd7, 0x65, 0xba, 0x6d, 0x14, 0xd4, - 0x69, 0xf7, 0xc9, 0xf4, 0x48, 0xcc, 0x34, 0x3b, 0x6c, 0xa5, 0xfc, 0x34, 0xfe, 0xab, 0x02, 0xdd, - 0x34, 0xcd, 0xd4, 0x88, 0x79, 0xde, 0x2e, 0x15, 0x51, 0x7e, 0xd2, 0x13, 0x60, 0xcf, 0x1a, 0xb9, - 0x98, 0x17, 0x09, 0x4c, 0x0f, 0x1b, 0x66, 0x8b, 0xc3, 0xd8, 0x06, 0x54, 0x9f, 0x38, 0xa7, 0x98, - 0xf2, 0x57, 0x19, 0xf5, 0x4d, 0x06, 0x61, 0xc1, 0xb3, 0x07, 0xab, 0xb2, 0xbe, 0xe0, 0x5a, 0x28, - 0x3f, 0xe9, 0xc8, 0x68, 0xe1, 0x30, 0xac, 0x5c, 0x0b, 0xe5, 0x27, 0xda, 0x87, 0x36, 0xdf, 0x72, - 0x6e, 0x05, 0xd6, 0x4c, 0xea, 0xe0, 0x6b, 0x4a, 0x3b, 0x7e, 0x1f, 0x9f, 0x3e, 0xa6, 0x2e, 0xe1, - 0xc8, 0x72, 0x02, 0x93, 0xcb, 0xec, 0x88, 0xad, 0x42, 0x5b, 0xa0, 0xf3, 0x5d, 0x26, 0x8e, 0x8b, - 0x85, 0x36, 0xaf, 0xb2, 0x08, 0xdd, 0x65, 0xf0, 0x7b, 0x8e, 0x8b, 0xb9, 0xc2, 0x46, 0x47, 0x60, - 0x52, 0x6a, 0x70, 0x7d, 0x65, 0x10, 0x26, 0xa3, 0xeb, 0xd0, 0xe1, 0xc3, 0xd2, 0xd3, 0x71, 0x77, - 0xcc, 0x69, 0x7c, 0xcc, 0x61, 0x2c, 0x49, 0x58, 0xcc, 0xb8, 0xc6, 0x03, 0x3f, 0x8e, 0xb7, 0x98, - 0x51, 0x7d, 0x37, 0xfe, 0xa8, 0x06, 0x1b, 0xd4, 0xec, 0x85, 0x07, 0x38, 0x47, 0xb8, 0x7d, 0x05, - 0xc0, 0x26, 0xe1, 0x30, 0xe5, 0xaa, 0x9a, 0x36, 0x09, 0x85, 0x33, 0xfe, 0x8e, 0x8c, 0x96, 0xd5, - 0xe2, 0x04, 0x3a, 0xe3, 0x86, 0xf2, 0x11, 0xf3, 0x85, 0x1a, 0x3b, 0xd7, 0xa1, 0x43, 0xfc, 0x45, - 0x30, 0xc6, 0xc3, 0x54, 0xa9, 0xd3, 0xe6, 0xc0, 0x07, 0x6a, 0x67, 0xba, 0xa2, 0x6c, 0x30, 0x25, - 0xa2, 0xe6, 0xea, 0xf9, 0xa2, 0x66, 0x23, 0x1b, 0x35, 0xdf, 0x87, 0x35, 0xe6, 0x09, 0x22, 0x2b, - 0x92, 0x0e, 0xa4, 0x8c, 0x19, 0x75, 0xd9, 0x52, 0xf9, 0x49, 0x92, 0x91, 0x0f, 0x52, 0x91, 0x8f, - 0x32, 0xc3, 0xc3, 0xd8, 0x1e, 0x86, 0x81, 0xe5, 0x91, 0x09, 0x0e, 0x58, 0xe4, 0x6c, 0x98, 0x6d, - 0x0a, 0x7c, 0x28, 0x60, 0xc6, 0x3f, 0x57, 0xe0, 0xb2, 0x28, 0x60, 0xcf, 0xaf, 0x17, 0x45, 0xe1, - 0x4b, 0xfa, 0xff, 0xea, 0x19, 0x25, 0x61, 0xad, 0x44, 0x6a, 0x56, 0x57, 0xa4, 0x66, 0xe9, 0xb2, - 0x68, 0x25, 0x57, 0x16, 0x45, 0x1d, 0x9a, 0xd5, 0xf2, 0x1d, 0x1a, 0x5a, 0xf0, 0xb3, 0x5c, 0x9d, - 0xc9, 0xae, 0x69, 0xf2, 0x8f, 0x72, 0x0c, 0xfd, 0x0f, 0x0d, 0x3a, 0x03, 0x6c, 0x05, 0xe3, 0x63, - 0xc9, 0xc7, 0x77, 0x93, 0x1d, 0xad, 0xd7, 0x0b, 0x44, 0x9c, 0x5a, 0xf2, 0xd5, 0x69, 0x65, 0xfd, - 0xa7, 0x06, 0xed, 0x5f, 0xa7, 0x43, 0xf2, 0xb0, 0xb7, 0x93, 0x87, 0x7d, 0xa3, 0xe0, 0xb0, 0x26, - 0x0e, 0x03, 0x07, 0x9f, 0xe0, 0xaf, 0xdc, 0x71, 0xff, 0x51, 0x83, 0xfe, 0xe0, 0xd4, 0x1b, 0x9b, - 0xdc, 0x96, 0xcf, 0x6f, 0x31, 0xd7, 0xa1, 0x73, 0x92, 0xca, 0xda, 0x2a, 0x4c, 0xe1, 0xda, 0x27, - 0xc9, 0xc2, 0xcf, 0x04, 0x5d, 0x36, 0xd2, 0xc4, 0x61, 0xa5, 0x6b, 0x7d, 0x53, 0x45, 0x75, 0x86, - 0x38, 0xe6, 0x9a, 0xd6, 0x82, 0x34, 0xd0, 0xf8, 0x03, 0x0d, 0x36, 0x14, 0x13, 0xd1, 0x15, 0x58, - 0x15, 0x45, 0xa6, 0x88, 0xc1, 0xdc, 0x86, 0x6d, 0x2a, 0x9e, 0xb8, 0x4d, 0xe2, 0xd8, 0xf9, 0x54, - 0xd0, 0x46, 0xaf, 0x42, 0x2b, 0xaa, 0x06, 0xec, 0x9c, 0x7c, 0x6c, 0x82, 0xfa, 0xd0, 0x10, 0xce, - 0x49, 0x96, 0x59, 0xd1, 0xb7, 0xf1, 0xb7, 0x1a, 0x5c, 0x7e, 0xcf, 0xf2, 0x6c, 0x7f, 0x32, 0x39, - 0x3f, 0x5b, 0xf7, 0x20, 0x55, 0x44, 0x94, 0x6d, 0x4f, 0xa4, 0x2b, 0x8f, 0xb7, 0x60, 0x3d, 0xe0, - 0x9e, 0xd1, 0x4e, 0xf3, 0xbd, 0x6a, 0xea, 0x72, 0x20, 0xe2, 0xe7, 0x5f, 0x56, 0x00, 0xd1, 0x60, - 0x70, 0xd7, 0x72, 0x2d, 0x6f, 0x8c, 0x5f, 0x9c, 0xf4, 0x1b, 0xd0, 0x4d, 0x85, 0xb0, 0xe8, 0xfe, - 0x2c, 0x19, 0xc3, 0x08, 0x7a, 0x1f, 0xba, 0x23, 0x8e, 0x6a, 0x18, 0x60, 0x8b, 0xf8, 0x1e, 0x73, - 0xae, 0x5d, 0x75, 0x27, 0xe2, 0x61, 0xe0, 0x4c, 0xa7, 0x38, 0xd8, 0xf3, 0x3d, 0x5b, 0xe4, 0x62, - 0x23, 0x49, 0x26, 0x5d, 0x4a, 0x05, 0x17, 0xc7, 0x73, 0x29, 0x1a, 0x88, 0x02, 0x3a, 0x63, 0x05, - 0xc1, 0x96, 0x1b, 0x33, 0x22, 0xf6, 0xc6, 0x3a, 0x1f, 0x18, 0x14, 0x37, 0xa2, 0x14, 0xf1, 0xd5, - 0xf8, 0x6b, 0x0d, 0x50, 0x54, 0x2f, 0xb1, 0xca, 0x90, 0x69, 0x5f, 0x76, 0xa9, 0xa6, 0x08, 0x0a, - 0x57, 0xa1, 0x69, 0xcb, 0x95, 0xc2, 0x5c, 0x62, 0x00, 0xf3, 0xd1, 0x8c, 0xe8, 0x21, 0x0d, 0xc6, - 0xd8, 0x96, 0xf5, 0x08, 0x07, 0x7e, 0xc0, 0x60, 0xe9, 0xf0, 0x5c, 0xcb, 0x86, 0xe7, 0x64, 0x9f, - 0xa5, 0x9e, 0xea, 0xb3, 0x18, 0x9f, 0x56, 0x40, 0x67, 0xee, 0x6e, 0x2f, 0x2e, 0xf6, 0x4b, 0x11, - 0x7d, 0x1d, 0x3a, 0xe2, 0x86, 0x39, 0x45, 0x78, 0xfb, 0x69, 0x62, 0x33, 0xf4, 0x0e, 0x5c, 0xe4, - 0x93, 0x02, 0x4c, 0x16, 0x6e, 0x9c, 0x8a, 0xf3, 0x64, 0x16, 0x3d, 0xe5, 0x7e, 0x96, 0x0e, 0xc9, - 0x15, 0x8f, 0xe0, 0xf2, 0xd4, 0xf5, 0x47, 0x96, 0x3b, 0x4c, 0x8b, 0x87, 0xcb, 0xb0, 0x84, 0xc6, - 0x5f, 0xe4, 0xcb, 0x07, 0x49, 0x19, 0x12, 0x74, 0x40, 0xcb, 0x7a, 0xfc, 0x24, 0xce, 0xf2, 0xeb, - 0xa5, 0xb3, 0xfc, 0x36, 0x5d, 0x18, 0x25, 0xf9, 0x7f, 0xaa, 0xc1, 0x5a, 0xa6, 0x55, 0x9a, 0x2d, - 0x29, 0xb5, 0x7c, 0x49, 0x79, 0x1b, 0xea, 0xb4, 0xce, 0xe2, 0xce, 0xb0, 0xab, 0x2e, 0x77, 0xd2, - 0xbb, 0x9a, 0x7c, 0x01, 0xba, 0x09, 0x1b, 0x8a, 0xeb, 0x4c, 0xa1, 0x03, 0x28, 0x7f, 0x9b, 0x69, - 0xfc, 0xbc, 0x06, 0xad, 0x04, 0x3f, 0x96, 0x54, 0xc3, 0x65, 0x7a, 0x5f, 0x99, 0xe3, 0x55, 0xf3, - 0xc7, 0x2b, 0xb8, 0x12, 0xa3, 0x7a, 0x37, 0xc3, 0x33, 0x9e, 0xfc, 0x8b, 0x4a, 0x64, 0x86, 0x67, - 0x2c, 0xf5, 0x4f, 0x66, 0xf5, 0x2b, 0xa9, 0xac, 0x3e, 0x53, 0xf7, 0xac, 0x9e, 0x51, 0xf7, 0x34, - 0xd2, 0x75, 0x4f, 0xca, 0x8e, 0x9a, 0x59, 0x3b, 0x2a, 0x5b, 0xa0, 0xbe, 0x03, 0x1b, 0xe3, 0x00, - 0x5b, 0x21, 0xb6, 0xef, 0x9e, 0xee, 0x45, 0x43, 0x22, 0x33, 0x52, 0x0d, 0xa1, 0x7b, 0x71, 0xcf, - 0x88, 0x4b, 0xb9, 0xcd, 0xa4, 0xac, 0x2e, 0xab, 0x84, 0x6c, 0xb8, 0x90, 0xa5, 0x7b, 0x66, 0x5f, - 0xd9, 0xd2, 0xb8, 0xf3, 0x42, 0xa5, 0xf1, 0xab, 0xd0, 0x92, 0xa1, 0x95, 0x9a, 0x7b, 0x97, 0x7b, - 0x3e, 0xe9, 0x0b, 0x6c, 0x92, 0x72, 0x06, 0x6b, 0xe9, 0xa6, 0x6b, 0xb6, 0x28, 0xd5, 0xf3, 0x45, - 0xe9, 0x15, 0x58, 0x75, 0xc8, 0x70, 0x62, 0x3d, 0xc1, 0xbd, 0x75, 0x36, 0xba, 0xe2, 0x90, 0x7b, - 0xd6, 0x13, 0x6c, 0xfc, 0x4b, 0x15, 0xba, 0x71, 0x15, 0x53, 0xda, 0x8d, 0x94, 0xb9, 0xd2, 0x7f, - 0x00, 0x7a, 0x1c, 0xa8, 0x19, 0x87, 0xcf, 0x2c, 0xc4, 0xb2, 0x37, 0x19, 0x6b, 0xf3, 0x8c, 0xbd, - 0xa6, 0x7a, 0xc5, 0xb5, 0xe7, 0xea, 0x15, 0x9f, 0xf3, 0x02, 0xf1, 0x16, 0x5c, 0x8a, 0x02, 0x70, - 0xea, 0xd8, 0x3c, 0xcb, 0xbf, 0x28, 0x07, 0x8f, 0x92, 0xc7, 0x2f, 0x70, 0x01, 0xab, 0x45, 0x2e, - 0x20, 0xab, 0x02, 0x8d, 0x9c, 0x0a, 0xe4, 0xef, 0x31, 0x9b, 0x8a, 0x7b, 0x4c, 0xe3, 0x11, 0x6c, - 0xb0, 0x36, 0x20, 0x19, 0x07, 0xce, 0x08, 0x47, 0x39, 0x6b, 0x19, 0xb1, 0xf6, 0xa1, 0x91, 0x49, - 0x7b, 0xa3, 0x6f, 0xe3, 0x77, 0x35, 0xb8, 0x9c, 0xdf, 0x97, 0x69, 0x4c, 0xec, 0x48, 0xb4, 0x94, - 0x23, 0xf9, 0x4d, 0xd8, 0x88, 0xb7, 0x4f, 0x27, 0xd4, 0x05, 0x29, 0xa3, 0x82, 0x70, 0x13, 0xc5, - 0x7b, 0x48, 0x98, 0xf1, 0x73, 0x2d, 0xea, 0xa6, 0x52, 0xd8, 0x94, 0xf5, 0x98, 0x69, 0x70, 0xf3, - 0x3d, 0xd7, 0xf1, 0xa2, 0xaa, 0x5b, 0x9c, 0x91, 0x03, 0x45, 0xd5, 0xfd, 0x1e, 0xac, 0x89, 0x49, - 0x51, 0x8c, 0x2a, 0x99, 0x95, 0x75, 0xf9, 0xba, 0x28, 0x3a, 0xdd, 0x80, 0xae, 0x68, 0xfe, 0x4a, - 0x7c, 0x55, 0x55, 0x4b, 0xf8, 0xd7, 0x40, 0x97, 0xd3, 0x9e, 0x37, 0x2a, 0xae, 0x89, 0x85, 0x51, - 0x76, 0xf7, 0x53, 0x0d, 0x7a, 0xe9, 0x18, 0x99, 0x38, 0xfe, 0xf3, 0xe7, 0x78, 0xdf, 0x4d, 0x5f, - 0x9b, 0xdd, 0x38, 0x83, 0x9e, 0x18, 0x8f, 0xbc, 0x3c, 0x7b, 0xc0, 0xae, 0x40, 0x69, 0x69, 0xb2, - 0xef, 0x90, 0x30, 0x70, 0x46, 0x8b, 0x73, 0xbd, 0xec, 0x30, 0xfe, 0xa6, 0x02, 0x5f, 0x57, 0x6e, - 0x78, 0x9e, 0x0b, 0xb2, 0xa2, 0x4e, 0xc0, 0x5d, 0x68, 0x64, 0x4a, 0x98, 0x37, 0xce, 0x38, 0xbc, - 0x68, 0x6a, 0xf1, 0xe6, 0x8a, 0x5c, 0x47, 0xf7, 0x88, 0x74, 0xba, 0x56, 0xbc, 0x87, 0x50, 0xda, - 0xd4, 0x1e, 0x72, 0x1d, 0xba, 0x03, 0x6d, 0x5e, 0x1e, 0x0e, 0x4f, 0x1c, 0xfc, 0x4c, 0xde, 0xeb, - 0x5c, 0x53, 0xfa, 0x35, 0x36, 0xef, 0xb1, 0x83, 0x9f, 0x99, 0x2d, 0x37, 0xfa, 0x4d, 0x8c, 0xff, - 0xae, 0x02, 0xc4, 0x63, 0xb4, 0x36, 0x8d, 0x0d, 0x46, 0x58, 0x40, 0x02, 0x42, 0x03, 0x71, 0x3a, - 0xf7, 0x93, 0x9f, 0xc8, 0x8c, 0xdb, 0xb3, 0xb6, 0x43, 0x42, 0xc1, 0x97, 0x9b, 0x67, 0xd3, 0x22, - 0x59, 0x44, 0x45, 0xc6, 0xaf, 0x4d, 0x64, 0xed, 0x45, 0x21, 0xe8, 0x6d, 0x40, 0xd3, 0xc0, 0x7f, - 0xe6, 0x78, 0xd3, 0x64, 0xc6, 0xce, 0x13, 0xfb, 0x75, 0x31, 0x92, 0x48, 0xd9, 0x7f, 0x0c, 0x7a, - 0x66, 0xba, 0x64, 0xc9, 0xad, 0x25, 0x64, 0x1c, 0xa4, 0xf6, 0x12, 0x37, 0x38, 0x6b, 0x69, 0x0c, - 0xa4, 0x3f, 0x04, 0x3d, 0x4b, 0xaf, 0xe2, 0x0e, 0xe6, 0xdb, 0xe9, 0x3b, 0x98, 0xb3, 0xcc, 0x94, - 0x6e, 0x93, 0xb8, 0x84, 0xe9, 0x4f, 0xe0, 0xa2, 0x8a, 0x12, 0x05, 0x92, 0xdb, 0x69, 0x24, 0x65, - 0x72, 0xda, 0xc4, 0x65, 0xcf, 0x0f, 0xa2, 0x74, 0x91, 0xb1, 0xb9, 0xc8, 0x03, 0x27, 0x9a, 0x72, - 0x95, 0x54, 0x53, 0xce, 0xf8, 0x63, 0x0d, 0x50, 0x5e, 0xbb, 0x51, 0x17, 0x2a, 0xd1, 0x26, 0x95, - 0xc3, 0xfd, 0x8c, 0x36, 0x55, 0x72, 0xda, 0x74, 0x15, 0x9a, 0x51, 0x44, 0x14, 0xee, 0x2f, 0x06, - 0x24, 0x75, 0xad, 0x96, 0xd6, 0xb5, 0x04, 0x61, 0xf5, 0x34, 0x61, 0xc7, 0x80, 0xf2, 0x16, 0x93, - 0xdc, 0x49, 0x4b, 0xef, 0xb4, 0x8c, 0xc2, 0x04, 0xa6, 0x6a, 0x1a, 0xd3, 0xbf, 0x57, 0x00, 0xc5, - 0x31, 0x3f, 0xba, 0x88, 0x2a, 0x13, 0x28, 0x6f, 0xc2, 0x46, 0x3e, 0x23, 0x90, 0x69, 0x10, 0xca, - 0xe5, 0x03, 0xaa, 0xd8, 0x5d, 0x55, 0xbd, 0x41, 0x7a, 0x37, 0xf2, 0x71, 0x3c, 0xc1, 0xb9, 0x56, - 0x94, 0xe0, 0x64, 0xdc, 0xdc, 0x6f, 0x65, 0xdf, 0x2e, 0x71, 0xa3, 0xb9, 0xad, 0xf4, 0x47, 0xb9, - 0x23, 0x2f, 0x7b, 0xb8, 0x74, 0xfe, 0xe7, 0x49, 0xff, 0x5a, 0x81, 0xf5, 0x88, 0x1b, 0xcf, 0xc5, - 0xe9, 0xe5, 0x17, 0x7f, 0x9f, 0x31, 0x6b, 0x3f, 0x52, 0xb3, 0xf6, 0x97, 0xcf, 0xcc, 0x61, 0x3f, - 0x3f, 0xce, 0x0e, 0x60, 0x55, 0xb4, 0xcf, 0x72, 0xb6, 0x5b, 0xa6, 0x4a, 0xbc, 0x08, 0x75, 0xea, - 0x2a, 0x64, 0x3f, 0x89, 0x7f, 0x18, 0x7f, 0xa5, 0x01, 0x0c, 0x4e, 0xbd, 0xf1, 0x1d, 0x6e, 0x42, - 0xef, 0x40, 0x6d, 0xd9, 0x03, 0x0d, 0x3a, 0x9b, 0x25, 0xdd, 0x6c, 0x66, 0x09, 0xa9, 0xa5, 0x0a, - 0xdc, 0x6a, 0xb6, 0xc0, 0x2d, 0x2a, 0x4d, 0x8b, 0xdd, 0xc6, 0xdf, 0x6b, 0x70, 0x85, 0x12, 0xf1, - 0x52, 0x72, 0x91, 0x52, 0xac, 0x4b, 0xb8, 0xa4, 0x6a, 0xda, 0x25, 0xdd, 0x86, 0x55, 0x5e, 0x63, - 0xca, 0xbc, 0xe0, 0x5a, 0x11, 0xcb, 0x38, 0x83, 0x4d, 0x39, 0x7d, 0xfb, 0x57, 0xa1, 0x19, 0xf5, - 0x7a, 0x51, 0x0b, 0x56, 0x1f, 0x79, 0xef, 0x7b, 0xfe, 0x33, 0x4f, 0xbf, 0x80, 0x56, 0xa1, 0x7a, - 0xc7, 0x75, 0x75, 0x0d, 0x75, 0xa0, 0x39, 0x08, 0x03, 0x6c, 0xcd, 0x1c, 0x6f, 0xaa, 0x57, 0x50, - 0x17, 0xe0, 0x3d, 0x87, 0x84, 0x7e, 0xe0, 0x8c, 0x2d, 0x57, 0xaf, 0x6e, 0x7f, 0x02, 0xdd, 0x74, - 0x25, 0x85, 0xda, 0xd0, 0x78, 0xe0, 0x87, 0x3f, 0xfc, 0xd8, 0x21, 0xa1, 0x7e, 0x81, 0xce, 0x7f, - 0xe0, 0x87, 0x47, 0x01, 0x26, 0xd8, 0x0b, 0x75, 0x0d, 0x01, 0xac, 0xfc, 0xc8, 0xdb, 0x77, 0xc8, - 0x13, 0xbd, 0x82, 0x36, 0x44, 0x93, 0xc4, 0x72, 0x0f, 0x45, 0x79, 0xa2, 0x57, 0xe9, 0xf2, 0xe8, - 0xab, 0x86, 0x74, 0x68, 0x47, 0x53, 0x0e, 0x8e, 0x1e, 0xe9, 0x75, 0xd4, 0x84, 0x3a, 0xff, 0xb9, - 0xb2, 0x6d, 0x83, 0x9e, 0xed, 0xf0, 0xd1, 0x3d, 0xf9, 0x21, 0x22, 0x90, 0x7e, 0x81, 0x9e, 0x4c, - 0xb4, 0x58, 0x75, 0x0d, 0xad, 0x41, 0x2b, 0xd1, 0xb0, 0xd4, 0x2b, 0x14, 0x70, 0x10, 0xcc, 0xc7, - 0x42, 0x7a, 0x9c, 0x04, 0x9a, 0x4b, 0xef, 0x53, 0x4e, 0xd4, 0xb6, 0xef, 0x42, 0x43, 0x96, 0x78, - 0x74, 0xaa, 0x60, 0x11, 0xfd, 0xd4, 0x2f, 0xa0, 0x75, 0xe8, 0xa4, 0xde, 0x66, 0xea, 0x1a, 0x42, - 0xd0, 0x4d, 0x3f, 0x8a, 0xd6, 0x2b, 0xdb, 0xbb, 0x00, 0xb1, 0xa9, 0x53, 0x72, 0x0e, 0xbd, 0x13, - 0xcb, 0x75, 0x6c, 0x4e, 0x1b, 0x1d, 0xa2, 0xdc, 0x65, 0xdc, 0xe1, 0xad, 0x3a, 0xbd, 0xb2, 0xfd, - 0x2a, 0x34, 0xa4, 0x96, 0x53, 0xb8, 0x89, 0x67, 0xfe, 0x09, 0xe6, 0x92, 0x19, 0xe0, 0x50, 0xd7, - 0x76, 0xff, 0xb7, 0x03, 0xc0, 0x9b, 0x72, 0xbe, 0x1f, 0xd8, 0xc8, 0x05, 0x74, 0x80, 0xc3, 0x3d, - 0x7f, 0x36, 0xf7, 0x3d, 0xd9, 0x2c, 0x20, 0x68, 0x27, 0xad, 0x0a, 0xe2, 0x23, 0x3f, 0x51, 0x9c, - 0xbe, 0xff, 0xba, 0x72, 0x7e, 0x66, 0xb2, 0x71, 0x01, 0xcd, 0x18, 0xb6, 0x87, 0xce, 0x0c, 0x3f, - 0x74, 0xc6, 0x4f, 0xa2, 0x4e, 0x5e, 0xf1, 0xbb, 0xe5, 0xcc, 0x54, 0x89, 0xef, 0xba, 0x12, 0xdf, - 0x20, 0x0c, 0x1c, 0x6f, 0x2a, 0x53, 0x71, 0xe3, 0x02, 0x7a, 0x9a, 0x79, 0x35, 0x2d, 0x11, 0xee, - 0x96, 0x79, 0x28, 0xfd, 0x62, 0x28, 0x5d, 0x58, 0xcb, 0xfc, 0x3f, 0x04, 0x6d, 0xab, 0x9f, 0xbb, - 0xa9, 0xfe, 0xcb, 0xd2, 0x7f, 0xab, 0xd4, 0xdc, 0x08, 0x9b, 0x03, 0xdd, 0xf4, 0x7f, 0x20, 0xd0, - 0x2f, 0x15, 0x6d, 0x90, 0x7b, 0x70, 0xdb, 0xdf, 0x2e, 0x33, 0x35, 0x42, 0xf5, 0x21, 0x57, 0xd0, - 0x65, 0xa8, 0x94, 0x6f, 0x8e, 0xfb, 0x67, 0x55, 0x41, 0xc6, 0x05, 0xf4, 0x13, 0x58, 0xcf, 0x3d, - 0x0b, 0x46, 0xdf, 0x50, 0xdf, 0xd6, 0xa8, 0x5f, 0x0f, 0x2f, 0xc3, 0xf0, 0x61, 0xd6, 0xbc, 0x8a, - 0xa9, 0xcf, 0xbd, 0xff, 0x2f, 0x4f, 0x7d, 0x62, 0xfb, 0xb3, 0xa8, 0x7f, 0x6e, 0x0c, 0x0b, 0x66, - 0x36, 0xd9, 0xd6, 0xf0, 0xdb, 0x2a, 0x14, 0x85, 0x6f, 0x93, 0xfb, 0x3b, 0x65, 0xa7, 0x27, 0xb5, - 0x2b, 0xfd, 0xfc, 0x55, 0xcd, 0x34, 0xe5, 0x93, 0x5d, 0xb5, 0x76, 0xa9, 0x5f, 0xd3, 0x1a, 0x17, - 0xd0, 0xc3, 0x94, 0x7b, 0x45, 0x6f, 0x14, 0x09, 0x27, 0x7d, 0x61, 0xb4, 0x8c, 0x6f, 0xbf, 0x0d, - 0x88, 0xdb, 0x8e, 0x37, 0x71, 0xa6, 0x8b, 0xc0, 0xe2, 0x8a, 0x55, 0xe4, 0x6e, 0xf2, 0x53, 0x25, - 0x9a, 0x6f, 0x3e, 0xc7, 0x8a, 0xe8, 0x48, 0x43, 0x80, 0x03, 0x1c, 0xde, 0xc7, 0x61, 0xe0, 0x8c, - 0x49, 0xf6, 0x44, 0xb1, 0x47, 0x15, 0x13, 0x24, 0xaa, 0x37, 0x97, 0xce, 0x8b, 0x10, 0x8c, 0xa0, - 0x75, 0x80, 0x43, 0x91, 0x57, 0x11, 0x54, 0xb8, 0x52, 0xce, 0x90, 0x28, 0xb6, 0x96, 0x4f, 0x4c, - 0xba, 0xb3, 0xcc, 0x53, 0x60, 0x54, 0x28, 0xd8, 0xfc, 0x03, 0x65, 0xb5, 0x3b, 0x2b, 0x78, 0x5b, - 0xcc, 0x4f, 0xb4, 0x77, 0x8c, 0xc7, 0x4f, 0xde, 0xc3, 0x96, 0x1b, 0x1e, 0x17, 0x9c, 0x28, 0x31, - 0xe3, 0xec, 0x13, 0xa5, 0x26, 0x4a, 0x1c, 0xbb, 0x9f, 0x76, 0xa1, 0xc9, 0xe2, 0x1f, 0x0d, 0xd6, - 0xbf, 0x08, 0x7f, 0x2f, 0x39, 0xfc, 0x7d, 0x04, 0x6b, 0x99, 0x97, 0xab, 0x6a, 0x7d, 0x51, 0x3f, - 0x6f, 0x2d, 0xe1, 0xc5, 0xd3, 0x6f, 0x47, 0xd5, 0x0e, 0x49, 0xf9, 0xbe, 0x74, 0xd9, 0xde, 0x8f, - 0xf9, 0xa3, 0xef, 0xa8, 0x6f, 0xfa, 0x66, 0x61, 0xe5, 0x95, 0xbe, 0x6f, 0xff, 0xe2, 0xa3, 0xc3, - 0x67, 0x1f, 0x3d, 0x3f, 0x82, 0xb5, 0xcc, 0xab, 0x27, 0xb5, 0x54, 0xd5, 0x4f, 0xa3, 0x96, 0xed, - 0xfe, 0x39, 0x86, 0x19, 0x1b, 0x36, 0x14, 0x0f, 0x52, 0xd0, 0x4e, 0x51, 0xe5, 0xa3, 0x7e, 0xb9, - 0xb2, 0xfc, 0x40, 0x9d, 0x94, 0x29, 0xa1, 0xad, 0x22, 0x22, 0xb3, 0xff, 0xca, 0xeb, 0x7f, 0xa3, - 0xdc, 0x5f, 0xf8, 0xa2, 0x03, 0x0d, 0x60, 0x85, 0xbf, 0x85, 0x42, 0xaf, 0xa9, 0xfb, 0x7f, 0x89, - 0x77, 0x52, 0xfd, 0x65, 0xaf, 0xa9, 0xc8, 0xc2, 0x0d, 0x09, 0xdb, 0xb4, 0xce, 0x3c, 0x24, 0x52, - 0x3e, 0xe2, 0x4b, 0x3e, 0x60, 0xea, 0x2f, 0x7f, 0xb3, 0x24, 0x37, 0xfd, 0xff, 0x1d, 0x8b, 0x3f, - 0x86, 0x0d, 0xc5, 0xad, 0x00, 0x2a, 0xca, 0xb9, 0x0a, 0xee, 0x23, 0xfa, 0x37, 0x4b, 0xcf, 0x8f, - 0x30, 0xff, 0x18, 0xf4, 0x6c, 0x47, 0x01, 0xbd, 0x55, 0xa4, 0xcf, 0x2a, 0x9c, 0x67, 0x2b, 0xf3, - 0xdd, 0x6f, 0x7d, 0xb8, 0x3b, 0x75, 0xc2, 0xe3, 0xc5, 0x88, 0x8e, 0xdc, 0xe4, 0x53, 0xdf, 0x76, - 0x7c, 0xf1, 0xeb, 0xa6, 0xe4, 0xff, 0x4d, 0xb6, 0xfa, 0x26, 0x43, 0x35, 0x1f, 0x8d, 0x56, 0xd8, - 0xe7, 0xad, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x92, 0xcc, 0x5f, 0xe1, 0x2a, 0x40, 0x00, 0x00, + // 4155 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x49, 0x6c, 0x24, 0x59, + 0x56, 0x15, 0xb9, 0xd8, 0x99, 0x2f, 0x17, 0xa7, 0xbf, 0x97, 0xca, 0xc9, 0xa9, 0xc5, 0x1d, 0xd5, + 0xd5, 0x6d, 0xdc, 0xdd, 0x76, 0x8f, 0x6b, 0xa6, 0xa9, 0xd9, 0x34, 0x54, 0xd9, 0x53, 0x6e, 0xd3, + 0x55, 0x6e, 0x13, 0xae, 0xaa, 0x41, 0xad, 0x66, 0x72, 0x22, 0x33, 0x7e, 0xa6, 0x43, 0x15, 0x19, + 0x91, 0x15, 0x11, 0x69, 0xb7, 0x1b, 0x89, 0x13, 0x97, 0x41, 0x80, 0x04, 0x07, 0x4e, 0x88, 0x03, + 0x02, 0x09, 0x24, 0x5a, 0xe2, 0x00, 0x37, 0x0e, 0x48, 0x48, 0x70, 0x02, 0x71, 0xe3, 0xc8, 0x15, + 0x09, 0x24, 0x04, 0xd2, 0x68, 0x34, 0x37, 0xf4, 0xb7, 0xc8, 0xf8, 0x11, 0x3f, 0x9c, 0x61, 0xbb, + 0xb7, 0x41, 0x73, 0x8b, 0x78, 0x7f, 0x79, 0xef, 0xbf, 0xfd, 0xfd, 0x05, 0x16, 0x5f, 0x4e, 0xb0, + 0x7f, 0xd6, 0xed, 0x7b, 0x9e, 0x6f, 0x6d, 0x8e, 0x7d, 0x2f, 0xf4, 0x10, 0x1a, 0xd9, 0xce, 0xc9, + 0x24, 0x60, 0x7f, 0x9b, 0xb4, 0xbd, 0x53, 0xef, 0x7b, 0xa3, 0x91, 0xe7, 0x32, 0x58, 0xa7, 0x1e, + 0xef, 0xd1, 0x69, 0xda, 0x6e, 0x88, 0x7d, 0xd7, 0x74, 0x44, 0x6b, 0xd0, 0x3f, 0xc6, 0x23, 0x93, + 0xff, 0xb5, 0x2c, 0x33, 0x34, 0xe3, 0xf3, 0xeb, 0xbf, 0xad, 0xc1, 0xea, 0xd1, 0xb1, 0x77, 0xba, + 0xe3, 0x39, 0x0e, 0xee, 0x87, 0xb6, 0xe7, 0x06, 0x06, 0x7e, 0x39, 0xc1, 0x41, 0x88, 0xde, 0x86, + 0x52, 0xcf, 0x0c, 0x70, 0x5b, 0x5b, 0xd3, 0xd6, 0x6b, 0xdb, 0x37, 0x36, 0x25, 0x4a, 0x38, 0x09, + 0x4f, 0x82, 0xe1, 0x43, 0x33, 0xc0, 0x06, 0xed, 0x89, 0x10, 0x94, 0xac, 0xde, 0xfe, 0x6e, 0xbb, + 0xb0, 0xa6, 0xad, 0x17, 0x0d, 0xfa, 0x8d, 0x5e, 0x85, 0x46, 0x3f, 0x9a, 0x7b, 0x7f, 0x37, 0x68, + 0x17, 0xd7, 0x8a, 0xeb, 0x45, 0x43, 0x06, 0xea, 0xff, 0xae, 0xc1, 0xf5, 0x14, 0x19, 0xc1, 0xd8, + 0x73, 0x03, 0x8c, 0xee, 0xc1, 0x5c, 0x10, 0x9a, 0xe1, 0x24, 0xe0, 0x94, 0x7c, 0x55, 0x49, 0xc9, + 0x11, 0xed, 0x62, 0xf0, 0xae, 0x69, 0xb4, 0x05, 0x05, 0x5a, 0xf4, 0x35, 0x58, 0xb6, 0xdd, 0x27, + 0x78, 0xe4, 0xf9, 0x67, 0xdd, 0x31, 0xf6, 0xfb, 0xd8, 0x0d, 0xcd, 0x21, 0x16, 0x34, 0x2e, 0x89, + 0xb6, 0xc3, 0x69, 0x13, 0x7a, 0x07, 0xae, 0x33, 0x29, 0x05, 0xd8, 0x3f, 0xb1, 0xfb, 0xb8, 0x6b, + 0x9e, 0x98, 0xb6, 0x63, 0xf6, 0x1c, 0xdc, 0x2e, 0xad, 0x15, 0xd7, 0x2b, 0xc6, 0x0a, 0x6d, 0x3e, + 0x62, 0xad, 0x0f, 0x44, 0xa3, 0xfe, 0xe7, 0x1a, 0xac, 0x90, 0x15, 0x1e, 0x9a, 0x7e, 0x68, 0x7f, + 0x06, 0x7c, 0xd6, 0xa1, 0x1e, 0x5f, 0x5b, 0xbb, 0x48, 0xdb, 0x24, 0x18, 0xe9, 0x33, 0x16, 0xe8, + 0x09, 0x4f, 0x4a, 0x74, 0x99, 0x12, 0x4c, 0xff, 0x33, 0xae, 0x10, 0x71, 0x3a, 0xaf, 0x22, 0x88, + 0x24, 0xce, 0x42, 0x1a, 0xe7, 0x25, 0xc4, 0xa0, 0xff, 0x73, 0x11, 0x56, 0x1e, 0x7b, 0xa6, 0x35, + 0x55, 0x98, 0xcf, 0x9f, 0x9d, 0xdf, 0x85, 0x39, 0x66, 0x5d, 0xed, 0x12, 0xc5, 0x75, 0x57, 0xc6, + 0xc5, 0x2d, 0x6f, 0x4a, 0xe1, 0x11, 0x05, 0x18, 0x7c, 0x10, 0xba, 0x0b, 0x4d, 0x1f, 0x8f, 0x1d, + 0xbb, 0x6f, 0x76, 0xdd, 0xc9, 0xa8, 0x87, 0xfd, 0x76, 0x79, 0x4d, 0x5b, 0x2f, 0x1b, 0x0d, 0x0e, + 0x3d, 0xa0, 0x40, 0xf4, 0x23, 0x68, 0x0c, 0x6c, 0xec, 0x58, 0x5d, 0xdb, 0xb5, 0xf0, 0x47, 0xfb, + 0xbb, 0xed, 0xb9, 0xb5, 0xe2, 0x7a, 0x6d, 0xfb, 0xdb, 0x9b, 0x69, 0xcf, 0xb0, 0xa9, 0xe4, 0xc8, + 0xe6, 0x23, 0x32, 0x7c, 0x9f, 0x8d, 0xfe, 0xbe, 0x1b, 0xfa, 0x67, 0x46, 0x7d, 0x10, 0x03, 0xa1, + 0x36, 0xcc, 0xfb, 0x78, 0xe0, 0xe3, 0xe0, 0xb8, 0x3d, 0xbf, 0xa6, 0xad, 0x57, 0x0c, 0xf1, 0x8b, + 0x5e, 0x87, 0x05, 0x1f, 0x07, 0xde, 0xc4, 0xef, 0xe3, 0xee, 0xd0, 0xf7, 0x26, 0xe3, 0xa0, 0x5d, + 0x59, 0x2b, 0xae, 0x57, 0x8d, 0xa6, 0x00, 0xef, 0x51, 0x68, 0xe7, 0x7b, 0xb0, 0x98, 0xc2, 0x82, + 0x5a, 0x50, 0x7c, 0x81, 0xcf, 0xa8, 0x20, 0x8a, 0x06, 0xf9, 0x44, 0xcb, 0x50, 0x3e, 0x31, 0x9d, + 0x09, 0xe6, 0xac, 0x66, 0x3f, 0xdf, 0x2a, 0xdc, 0xd7, 0xf4, 0x3f, 0xd6, 0xa0, 0x6d, 0x60, 0x07, + 0x9b, 0x01, 0xfe, 0x22, 0x45, 0xba, 0x0a, 0x73, 0xae, 0x67, 0xe1, 0xfd, 0x5d, 0x2a, 0xd2, 0xa2, + 0xc1, 0xff, 0xf4, 0x9f, 0x69, 0xb0, 0xbc, 0x87, 0x43, 0xa2, 0xdb, 0x76, 0x10, 0xda, 0xfd, 0xc8, + 0x78, 0xbf, 0x0b, 0x45, 0x1f, 0xbf, 0xe4, 0x94, 0xbd, 0x21, 0x53, 0x16, 0xb9, 0x62, 0xd5, 0x48, + 0x83, 0x8c, 0x43, 0xaf, 0x40, 0xdd, 0x1a, 0x39, 0xdd, 0xfe, 0xb1, 0xe9, 0xba, 0xd8, 0x61, 0xd6, + 0x51, 0x35, 0x6a, 0xd6, 0xc8, 0xd9, 0xe1, 0x20, 0x74, 0x0b, 0x20, 0xc0, 0xc3, 0x11, 0x76, 0xc3, + 0xa9, 0xf7, 0x8c, 0x41, 0xd0, 0x06, 0x2c, 0x0e, 0x7c, 0x6f, 0xd4, 0x0d, 0x8e, 0x4d, 0xdf, 0xea, + 0x3a, 0xd8, 0xb4, 0xb0, 0x4f, 0xa9, 0xaf, 0x18, 0x0b, 0xa4, 0xe1, 0x88, 0xc0, 0x1f, 0x53, 0x30, + 0xba, 0x07, 0xe5, 0xa0, 0xef, 0x8d, 0x31, 0xd5, 0xb4, 0xe6, 0xf6, 0x4d, 0x95, 0x0e, 0xed, 0x9a, + 0xa1, 0x79, 0x44, 0x3a, 0x19, 0xac, 0xaf, 0xfe, 0x3f, 0xdc, 0xd4, 0xbe, 0xe4, 0x9e, 0x2b, 0x66, + 0x8e, 0xe5, 0x4f, 0xc7, 0x1c, 0xe7, 0x72, 0x99, 0xe3, 0xfc, 0xf9, 0xe6, 0x98, 0xe2, 0xda, 0x45, + 0xcc, 0xb1, 0x32, 0xd3, 0x1c, 0xab, 0x9f, 0x8d, 0x39, 0xfe, 0xfd, 0xd4, 0x1c, 0xbf, 0xec, 0x62, + 0x9f, 0x9a, 0x6c, 0x59, 0x32, 0xd9, 0xbf, 0xd4, 0xe0, 0x2b, 0x7b, 0x38, 0x8c, 0xc8, 0x27, 0x16, + 0x88, 0xbf, 0xa4, 0x41, 0xf7, 0x13, 0x0d, 0x3a, 0x2a, 0x5a, 0xaf, 0x12, 0x78, 0x3f, 0x80, 0xd5, + 0x08, 0x47, 0xd7, 0xc2, 0x41, 0xdf, 0xb7, 0xc7, 0x54, 0x8c, 0xd4, 0xc9, 0xd4, 0xb6, 0xef, 0xa8, + 0x34, 0x36, 0x49, 0xc1, 0x4a, 0x34, 0xc5, 0x6e, 0x6c, 0x06, 0xfd, 0xf7, 0x34, 0x58, 0x21, 0x4e, + 0x8d, 0x7b, 0x21, 0x77, 0xe0, 0x5d, 0x9e, 0xaf, 0xb2, 0x7f, 0x2b, 0xa4, 0xfc, 0x5b, 0x0e, 0x1e, + 0xd3, 0x2c, 0x36, 0x49, 0xcf, 0x55, 0x78, 0xf7, 0x0d, 0x28, 0xdb, 0xee, 0xc0, 0x13, 0xac, 0xba, + 0xad, 0x62, 0x55, 0x1c, 0x19, 0xeb, 0xad, 0xbb, 0x8c, 0x8a, 0xa9, 0xc3, 0xbd, 0x82, 0xba, 0x25, + 0x97, 0x5d, 0x50, 0x2c, 0xfb, 0x77, 0x35, 0xb8, 0x9e, 0x42, 0x78, 0x95, 0x75, 0x7f, 0x07, 0xe6, + 0x68, 0x18, 0x11, 0x0b, 0x7f, 0x55, 0xb9, 0xf0, 0x18, 0xba, 0xc7, 0x76, 0x10, 0x1a, 0x7c, 0x8c, + 0xee, 0x41, 0x2b, 0xd9, 0x46, 0x02, 0x1c, 0x0f, 0x6e, 0x5d, 0xd7, 0x1c, 0x31, 0x06, 0x54, 0x8d, + 0x1a, 0x87, 0x1d, 0x98, 0x23, 0x8c, 0xbe, 0x02, 0x15, 0x62, 0xb2, 0x5d, 0xdb, 0x12, 0xe2, 0x9f, + 0xa7, 0x26, 0x6c, 0x05, 0xe8, 0x26, 0x00, 0x6d, 0x32, 0x2d, 0xcb, 0x67, 0xb1, 0xaf, 0x6a, 0x54, + 0x09, 0xe4, 0x01, 0x01, 0xe8, 0x7f, 0xa0, 0x41, 0x9d, 0xf8, 0xd8, 0x27, 0x38, 0x34, 0x89, 0x1c, + 0xd0, 0x37, 0xa1, 0xea, 0x78, 0xa6, 0xd5, 0x0d, 0xcf, 0xc6, 0x0c, 0x55, 0x33, 0xc9, 0xeb, 0xa9, + 0x63, 0x7e, 0x7a, 0x36, 0xc6, 0x46, 0xc5, 0xe1, 0x5f, 0x79, 0xf8, 0x9d, 0x32, 0xe5, 0xa2, 0xc2, + 0x94, 0xff, 0xb1, 0x0c, 0xab, 0x3f, 0x30, 0xc3, 0xfe, 0xf1, 0xee, 0x48, 0x84, 0xf0, 0xcb, 0x2b, + 0xc1, 0xd4, 0xb7, 0x15, 0xe2, 0xbe, 0xed, 0x53, 0xf3, 0x9d, 0x91, 0x9e, 0x97, 0x55, 0x7a, 0x4e, + 0x8a, 0xc5, 0xcd, 0xe7, 0x5c, 0x54, 0x31, 0x3d, 0x8f, 0x45, 0xda, 0xb9, 0xcb, 0x44, 0xda, 0x1d, + 0x68, 0xe0, 0x8f, 0xfa, 0xce, 0x84, 0xc8, 0x9c, 0x62, 0x67, 0x21, 0xf4, 0x96, 0x02, 0x7b, 0xdc, + 0xc8, 0xea, 0x7c, 0xd0, 0x3e, 0xa7, 0x81, 0x89, 0x7a, 0x84, 0x43, 0x93, 0xc6, 0xc9, 0xda, 0xf6, + 0x5a, 0x96, 0xa8, 0x85, 0x7e, 0x30, 0x71, 0x93, 0x3f, 0x74, 0x03, 0xaa, 0x3c, 0xae, 0xef, 0xef, + 0xb6, 0xab, 0x94, 0x7d, 0x53, 0x00, 0x32, 0xa1, 0xc1, 0x3d, 0x10, 0xa7, 0x10, 0x28, 0x85, 0xdf, + 0x51, 0x21, 0x50, 0x0b, 0x3b, 0x4e, 0x79, 0xc0, 0xa3, 0x7c, 0x10, 0x03, 0x91, 0x02, 0xd5, 0x1b, + 0x0c, 0x1c, 0xdb, 0xc5, 0x07, 0x4c, 0xc2, 0x35, 0x4a, 0x84, 0x0c, 0x24, 0xb9, 0xc0, 0x09, 0xf6, + 0x03, 0xdb, 0x73, 0xdb, 0x75, 0xda, 0x2e, 0x7e, 0x3b, 0x5d, 0x58, 0x4c, 0xa1, 0x50, 0x84, 0xf8, + 0xaf, 0xc7, 0x43, 0xfc, 0x6c, 0x1e, 0xc7, 0x52, 0x80, 0xbf, 0xd0, 0x60, 0xe5, 0x99, 0x1b, 0x4c, + 0x7a, 0xd1, 0xda, 0xbe, 0x18, 0x3d, 0x4e, 0x7a, 0x90, 0x52, 0xca, 0x83, 0xe8, 0x3f, 0x2e, 0xc3, + 0x02, 0x5f, 0x05, 0x11, 0x37, 0x75, 0x05, 0x37, 0xa0, 0x1a, 0x05, 0x11, 0xce, 0x90, 0x29, 0x00, + 0xad, 0x41, 0x2d, 0x66, 0x08, 0x9c, 0xaa, 0x38, 0x28, 0x17, 0x69, 0x22, 0x25, 0x28, 0xc5, 0x52, + 0x82, 0x9b, 0x00, 0x03, 0x67, 0x12, 0x1c, 0x77, 0x43, 0x7b, 0x84, 0x79, 0x4a, 0x52, 0xa5, 0x90, + 0xa7, 0xf6, 0x08, 0xa3, 0x07, 0x50, 0xef, 0xd9, 0xae, 0xe3, 0x0d, 0xbb, 0x63, 0x33, 0x3c, 0x0e, + 0x78, 0x31, 0xa7, 0x12, 0x0b, 0x4d, 0xe0, 0x1e, 0xd2, 0xbe, 0x46, 0x8d, 0x8d, 0x39, 0x24, 0x43, + 0xd0, 0x2d, 0xa8, 0xb9, 0x93, 0x51, 0xd7, 0x1b, 0x74, 0x7d, 0xef, 0x34, 0xa0, 0x25, 0x5b, 0xd1, + 0xa8, 0xba, 0x93, 0xd1, 0xfb, 0x03, 0xc3, 0x3b, 0x25, 0x4e, 0xbc, 0x4a, 0xdc, 0x79, 0xe0, 0x78, + 0x43, 0x56, 0xae, 0xcd, 0x9e, 0x7f, 0x3a, 0x80, 0x8c, 0xb6, 0xb0, 0x13, 0x9a, 0x74, 0x74, 0x35, + 0xdf, 0xe8, 0x68, 0x00, 0x7a, 0x0d, 0x9a, 0x7d, 0x6f, 0x34, 0x36, 0x29, 0x87, 0x1e, 0xf9, 0xde, + 0x88, 0x5a, 0x4e, 0xd1, 0x48, 0x40, 0xd1, 0x0e, 0xd4, 0x68, 0xfe, 0xcc, 0xcd, 0xab, 0x46, 0xf1, + 0xe8, 0x2a, 0xf3, 0x8a, 0xe5, 0xb1, 0x44, 0x41, 0xc1, 0x16, 0x9f, 0x01, 0xd1, 0x0c, 0x61, 0xa5, + 0x81, 0xfd, 0x31, 0xe6, 0x16, 0x52, 0xe3, 0xb0, 0x23, 0xfb, 0x63, 0x4c, 0x92, 0x7a, 0xdb, 0x0d, + 0xb0, 0x1f, 0x8a, 0x12, 0xab, 0xdd, 0xa0, 0xea, 0xd3, 0x60, 0x50, 0xae, 0xd8, 0x68, 0x1f, 0x9a, + 0x41, 0x68, 0xfa, 0x61, 0x77, 0xec, 0x05, 0x54, 0x01, 0xda, 0x4d, 0xaa, 0xdb, 0x7a, 0x46, 0x41, + 0xf7, 0x24, 0x18, 0x1e, 0xf2, 0x9e, 0x46, 0x83, 0x8e, 0x14, 0xbf, 0xfa, 0x7f, 0x17, 0xa0, 0x29, + 0xd3, 0x4c, 0x8c, 0x98, 0x25, 0xf8, 0x42, 0x11, 0xc5, 0x2f, 0x59, 0x01, 0x76, 0xcd, 0x9e, 0x83, + 0x59, 0x35, 0x41, 0xf5, 0xb0, 0x62, 0xd4, 0x18, 0x8c, 0x4e, 0x40, 0xf4, 0x89, 0x71, 0x8a, 0x2a, + 0x7f, 0x91, 0x52, 0x5f, 0xa5, 0x10, 0x1a, 0x3c, 0xdb, 0x30, 0x2f, 0x0a, 0x11, 0xa6, 0x85, 0xe2, + 0x97, 0xb4, 0xf4, 0x26, 0x36, 0xc5, 0xca, 0xb4, 0x50, 0xfc, 0xa2, 0x5d, 0xa8, 0xb3, 0x29, 0xc7, + 0xa6, 0x6f, 0x8e, 0x84, 0x0e, 0xbe, 0xa2, 0xb4, 0xe3, 0xf7, 0xf0, 0xd9, 0x73, 0xe2, 0x12, 0x0e, + 0x4d, 0xdb, 0x37, 0x98, 0xcc, 0x0e, 0xe9, 0x28, 0xb4, 0x0e, 0x2d, 0x36, 0xcb, 0xc0, 0x76, 0x30, + 0xd7, 0xe6, 0x79, 0x56, 0x8d, 0x50, 0xf8, 0x23, 0xdb, 0xc1, 0x4c, 0x61, 0xa3, 0x25, 0x50, 0x29, + 0x55, 0x98, 0xbe, 0x52, 0x08, 0x95, 0xd1, 0x1d, 0x68, 0xb0, 0x66, 0xe1, 0xe9, 0x98, 0x3b, 0x66, + 0x34, 0x3e, 0x67, 0x30, 0x9a, 0x24, 0x4c, 0x46, 0x4c, 0xe3, 0x81, 0x2d, 0xc7, 0x9d, 0x8c, 0x88, + 0xbe, 0xeb, 0x7f, 0x58, 0x82, 0x25, 0x62, 0xf6, 0xdc, 0x03, 0x5c, 0x21, 0xdc, 0xde, 0x04, 0xb0, + 0x82, 0xb0, 0x2b, 0xb9, 0xaa, 0xaa, 0x15, 0x84, 0xdc, 0x19, 0x7f, 0x53, 0x44, 0xcb, 0x62, 0x76, + 0x02, 0x9d, 0x70, 0x43, 0xe9, 0x88, 0x79, 0xa9, 0xad, 0xa2, 0x3b, 0xd0, 0xe0, 0x65, 0x9f, 0x54, + 0xea, 0xd4, 0x19, 0xf0, 0x40, 0xed, 0x4c, 0xe7, 0x94, 0x5b, 0x56, 0xb1, 0xa8, 0x39, 0x7f, 0xb5, + 0xa8, 0x59, 0x49, 0x46, 0xcd, 0xf7, 0x60, 0x81, 0x7a, 0x82, 0xc8, 0x8a, 0x84, 0x03, 0xc9, 0x63, + 0x46, 0x4d, 0x3a, 0x54, 0xfc, 0x06, 0xf1, 0xc8, 0x07, 0x52, 0xe4, 0x23, 0xcc, 0x70, 0x31, 0xb6, + 0xba, 0xa1, 0x6f, 0xba, 0xc1, 0x00, 0xfb, 0x34, 0x72, 0x56, 0x8c, 0x3a, 0x01, 0x3e, 0xe5, 0x30, + 0xfd, 0x5f, 0x0a, 0xb0, 0xca, 0x0b, 0xd8, 0xab, 0xeb, 0x45, 0x56, 0xf8, 0x12, 0xfe, 0xbf, 0x78, + 0x4e, 0x49, 0x58, 0xca, 0x91, 0x9a, 0x95, 0x15, 0xa9, 0x99, 0x5c, 0x16, 0xcd, 0xa5, 0xca, 0xa2, + 0x68, 0x2b, 0x67, 0x3e, 0xff, 0x56, 0x0e, 0x29, 0xf8, 0x69, 0xae, 0x4e, 0x65, 0x57, 0x35, 0xd8, + 0x4f, 0x3e, 0x86, 0xfe, 0xa7, 0x06, 0x8d, 0x23, 0x6c, 0xfa, 0xfd, 0x63, 0xc1, 0xc7, 0x77, 0xe2, + 0x5b, 0x5f, 0xaf, 0x66, 0x88, 0x58, 0x1a, 0xf2, 0xf3, 0xb3, 0xe7, 0xf5, 0x5f, 0x1a, 0xd4, 0x7f, + 0x8d, 0x34, 0x89, 0xc5, 0xde, 0x8f, 0x2f, 0xf6, 0xb5, 0x8c, 0xc5, 0x1a, 0x38, 0xf4, 0x6d, 0x7c, + 0x82, 0x7f, 0xee, 0x96, 0xfb, 0x4f, 0x1a, 0x74, 0x8e, 0xce, 0xdc, 0xbe, 0xc1, 0x6c, 0xf9, 0xea, + 0x16, 0x73, 0x07, 0x1a, 0x27, 0x52, 0xd6, 0x56, 0xa0, 0x0a, 0x57, 0x3f, 0x89, 0x17, 0x7e, 0x06, + 0xb4, 0xc4, 0x8e, 0x1b, 0x5f, 0xac, 0x70, 0xad, 0xaf, 0xab, 0xa8, 0x4e, 0x10, 0x47, 0x5d, 0xd3, + 0x82, 0x2f, 0x03, 0xf5, 0xdf, 0xd7, 0x60, 0x49, 0xd1, 0x11, 0x5d, 0x87, 0x79, 0x5e, 0x64, 0xf2, + 0x18, 0xcc, 0x6c, 0xd8, 0x22, 0xe2, 0x99, 0x6e, 0x93, 0xd8, 0x56, 0x3a, 0x15, 0xb4, 0xd0, 0x6d, + 0xa8, 0x45, 0xd5, 0x80, 0x95, 0x92, 0x8f, 0x15, 0xa0, 0x0e, 0x54, 0xb8, 0x73, 0x12, 0x65, 0x56, + 0xf4, 0xaf, 0xff, 0x9d, 0x06, 0xab, 0xef, 0x9a, 0xae, 0xe5, 0x0d, 0x06, 0x57, 0x67, 0xeb, 0x0e, + 0x48, 0x45, 0x44, 0xde, 0xed, 0x09, 0xb9, 0xf2, 0x78, 0x03, 0x16, 0x7d, 0xe6, 0x19, 0x2d, 0x99, + 0xef, 0x45, 0xa3, 0x25, 0x1a, 0x22, 0x7e, 0xfe, 0x55, 0x01, 0x10, 0x09, 0x06, 0x0f, 0x4d, 0xc7, + 0x74, 0xfb, 0xf8, 0xf2, 0xa4, 0xdf, 0x85, 0xa6, 0x14, 0xc2, 0xa2, 0x13, 0xb9, 0x78, 0x0c, 0x0b, + 0xd0, 0x7b, 0xd0, 0xec, 0x31, 0x54, 0x5d, 0x1f, 0x9b, 0x81, 0xe7, 0x52, 0xe7, 0xda, 0x54, 0xef, + 0x44, 0x3c, 0xf5, 0xed, 0xe1, 0x10, 0xfb, 0x3b, 0x9e, 0x6b, 0xf1, 0x5c, 0xac, 0x27, 0xc8, 0x24, + 0x43, 0x89, 0xe0, 0xa6, 0xf1, 0x5c, 0x88, 0x06, 0xa2, 0x80, 0x4e, 0x59, 0x11, 0x60, 0xd3, 0x99, + 0x32, 0x62, 0xea, 0x8d, 0x5b, 0xac, 0xe1, 0x28, 0x7b, 0x23, 0x4a, 0x11, 0x5f, 0xf5, 0xbf, 0xd1, + 0x00, 0x45, 0xf5, 0x12, 0xad, 0x0c, 0xa9, 0xf6, 0x25, 0x87, 0x6a, 0x8a, 0xa0, 0x70, 0x03, 0xaa, + 0x96, 0x18, 0xc9, 0xcd, 0x65, 0x0a, 0xa0, 0x3e, 0x9a, 0x12, 0xdd, 0x25, 0xc1, 0x18, 0x5b, 0xa2, + 0x1e, 0x61, 0xc0, 0xc7, 0x14, 0x26, 0x87, 0xe7, 0x52, 0x32, 0x3c, 0xc7, 0xf7, 0x59, 0xca, 0xd2, + 0x3e, 0x8b, 0xfe, 0x49, 0x01, 0x5a, 0xd4, 0xdd, 0xed, 0x4c, 0x8b, 0xfd, 0x5c, 0x44, 0xdf, 0x81, + 0x06, 0x3f, 0xb3, 0x96, 0x08, 0xaf, 0xbf, 0x8c, 0x4d, 0x86, 0xde, 0x86, 0x65, 0xd6, 0xc9, 0xc7, + 0xc1, 0xc4, 0x99, 0xa6, 0xe2, 0x2c, 0x99, 0x45, 0x2f, 0x99, 0x9f, 0x25, 0x4d, 0x62, 0xc4, 0x33, + 0x58, 0x1d, 0x3a, 0x5e, 0xcf, 0x74, 0xba, 0xb2, 0x78, 0x98, 0x0c, 0x73, 0x68, 0xfc, 0x32, 0x1b, + 0x7e, 0x14, 0x97, 0x61, 0x80, 0xf6, 0x48, 0x59, 0x8f, 0x5f, 0x4c, 0xb3, 0xfc, 0x72, 0xee, 0x2c, + 0xbf, 0x4e, 0x06, 0x46, 0x49, 0xfe, 0x9f, 0x68, 0xb0, 0x90, 0xd8, 0x2a, 0x4d, 0x96, 0x94, 0x5a, + 0xba, 0xa4, 0xbc, 0x0f, 0x65, 0x52, 0x67, 0x31, 0x67, 0xd8, 0x54, 0x97, 0x3b, 0xf2, 0xac, 0x06, + 0x1b, 0x80, 0xb6, 0x60, 0x49, 0x71, 0x40, 0xca, 0x75, 0x00, 0xa5, 0xcf, 0x47, 0xf5, 0x9f, 0x94, + 0xa0, 0x16, 0xe3, 0xc7, 0x8c, 0x6a, 0x38, 0xcf, 0xde, 0x57, 0x62, 0x79, 0xc5, 0xf4, 0xf2, 0x32, + 0xce, 0xce, 0x88, 0xde, 0x8d, 0xf0, 0x88, 0x25, 0xff, 0xbc, 0x12, 0x19, 0xe1, 0x11, 0x4d, 0xfd, + 0xe3, 0x59, 0xfd, 0x9c, 0x94, 0xd5, 0x27, 0xea, 0x9e, 0xf9, 0x73, 0xea, 0x9e, 0x8a, 0x5c, 0xf7, + 0x48, 0x76, 0x54, 0x4d, 0xda, 0x51, 0xde, 0x02, 0xf5, 0x6d, 0x58, 0xea, 0xfb, 0xd8, 0x0c, 0xb1, + 0xf5, 0xf0, 0x6c, 0x27, 0x6a, 0xe2, 0x99, 0x91, 0xaa, 0x09, 0x3d, 0x9a, 0xee, 0x19, 0x31, 0x29, + 0xd7, 0xa9, 0x94, 0xd5, 0x65, 0x15, 0x97, 0x0d, 0x13, 0xb2, 0x70, 0xcf, 0xf4, 0x2f, 0x59, 0x1a, + 0x37, 0x2e, 0x55, 0x1a, 0xdf, 0x86, 0x9a, 0x08, 0xad, 0xc4, 0xdc, 0x9b, 0xcc, 0xf3, 0x09, 0x5f, + 0x60, 0x05, 0x92, 0x33, 0x58, 0x90, 0x37, 0x5d, 0x93, 0x45, 0x69, 0x2b, 0x5d, 0x94, 0x5e, 0x87, + 0x79, 0x3b, 0xe8, 0x0e, 0xcc, 0x17, 0xb8, 0xbd, 0x48, 0x5b, 0xe7, 0xec, 0xe0, 0x91, 0xf9, 0x02, + 0xeb, 0xff, 0x5a, 0x84, 0xe6, 0xb4, 0x8a, 0xc9, 0xed, 0x46, 0xf2, 0x5c, 0x12, 0x38, 0x80, 0xd6, + 0x34, 0x50, 0x53, 0x0e, 0x9f, 0x5b, 0x88, 0x25, 0x4f, 0x32, 0x16, 0xc6, 0x09, 0x7b, 0x95, 0xf6, + 0x8a, 0x4b, 0x17, 0xda, 0x2b, 0xbe, 0xe2, 0x49, 0xe3, 0x3d, 0x58, 0x89, 0x02, 0xb0, 0xb4, 0x6c, + 0x96, 0xe5, 0x2f, 0x8b, 0xc6, 0xc3, 0xf8, 0xf2, 0x33, 0x5c, 0xc0, 0x7c, 0x96, 0x0b, 0x48, 0xaa, + 0x40, 0x25, 0xa5, 0x02, 0xe9, 0x03, 0xcf, 0xaa, 0xe2, 0xc0, 0x53, 0x7f, 0x06, 0x4b, 0x74, 0x1b, + 0x30, 0xe8, 0xfb, 0x76, 0x0f, 0x47, 0x39, 0x6b, 0x1e, 0xb1, 0x76, 0xa0, 0x92, 0x48, 0x7b, 0xa3, + 0x7f, 0xfd, 0x77, 0x34, 0x58, 0x4d, 0xcf, 0x4b, 0x35, 0x66, 0xea, 0x48, 0x34, 0xc9, 0x91, 0xfc, + 0x3a, 0x2c, 0x4d, 0xa7, 0x97, 0x13, 0xea, 0x8c, 0x94, 0x51, 0x41, 0xb8, 0x81, 0xa6, 0x73, 0x08, + 0x98, 0xfe, 0x13, 0x2d, 0xda, 0x4d, 0x25, 0xb0, 0x21, 0xdd, 0x63, 0x26, 0xc1, 0xcd, 0x73, 0x1d, + 0xdb, 0x8d, 0xaa, 0x6e, 0xbe, 0x46, 0x06, 0xe4, 0x55, 0xf7, 0xbb, 0xb0, 0xc0, 0x3b, 0x45, 0x31, + 0x2a, 0x67, 0x56, 0xd6, 0x64, 0xe3, 0xa2, 0xe8, 0x74, 0x17, 0x9a, 0x7c, 0xf3, 0x57, 0xe0, 0x2b, + 0xaa, 0xb6, 0x84, 0x7f, 0x15, 0x5a, 0xa2, 0xdb, 0x45, 0xa3, 0xe2, 0x02, 0x1f, 0x18, 0x65, 0x77, + 0x3f, 0xd6, 0xa0, 0x2d, 0xc7, 0xc8, 0xd8, 0xf2, 0x2f, 0x9e, 0xe3, 0x7d, 0x5b, 0x3e, 0x36, 0xbb, + 0x7b, 0x0e, 0x3d, 0x53, 0x3c, 0xe2, 0xf0, 0xec, 0x80, 0x1e, 0x81, 0x92, 0xd2, 0x64, 0xd7, 0x0e, + 0x42, 0xdf, 0xee, 0x4d, 0xae, 0x74, 0x05, 0x44, 0xff, 0xdb, 0x02, 0x7c, 0x55, 0x39, 0xe1, 0x55, + 0x0e, 0xc8, 0xb2, 0x76, 0x02, 0x1e, 0x42, 0x25, 0x51, 0xc2, 0xbc, 0x76, 0xce, 0xe2, 0xf9, 0xa6, + 0x16, 0xdb, 0x5c, 0x11, 0xe3, 0xc8, 0x1c, 0x91, 0x4e, 0x97, 0xb2, 0xe7, 0xe0, 0x4a, 0x2b, 0xcd, + 0x21, 0xc6, 0xa1, 0x07, 0x50, 0x67, 0xe5, 0x61, 0xf7, 0xc4, 0xc6, 0xa7, 0xe2, 0x5c, 0xe7, 0x96, + 0xd2, 0xaf, 0xd1, 0x7e, 0xcf, 0x6d, 0x7c, 0x6a, 0xd4, 0x9c, 0xe8, 0x3b, 0xd0, 0xff, 0xb7, 0x08, + 0x30, 0x6d, 0x23, 0xb5, 0xe9, 0xd4, 0x60, 0xb8, 0x05, 0xc4, 0x20, 0x24, 0x10, 0xcb, 0xb9, 0x9f, + 0xf8, 0x45, 0xc6, 0x74, 0x7b, 0xd6, 0xb2, 0x83, 0x90, 0xf3, 0x65, 0xeb, 0x7c, 0x5a, 0x04, 0x8b, + 0x88, 0xc8, 0xd8, 0xb1, 0x89, 0xa8, 0xbd, 0x08, 0x04, 0xbd, 0x05, 0x68, 0xe8, 0x7b, 0xa7, 0xb6, + 0x3b, 0x8c, 0x67, 0xec, 0x2c, 0xb1, 0x5f, 0xe4, 0x2d, 0xb1, 0x94, 0xfd, 0x87, 0xd0, 0x4a, 0x74, + 0x17, 0x2c, 0xb9, 0x37, 0x83, 0x8c, 0x3d, 0x69, 0x2e, 0x7e, 0x82, 0xb3, 0x20, 0x63, 0x08, 0x3a, + 0x5d, 0x68, 0x25, 0xe9, 0x55, 0x9c, 0xc1, 0x7c, 0x43, 0x3e, 0x83, 0x39, 0xcf, 0x4c, 0xc9, 0x34, + 0xb1, 0x43, 0x98, 0xce, 0x00, 0x96, 0x55, 0x94, 0x28, 0x90, 0xdc, 0x97, 0x91, 0xe4, 0xc9, 0x69, + 0x63, 0x87, 0x3d, 0xdf, 0x8b, 0xd2, 0x45, 0xca, 0xe6, 0x2c, 0x0f, 0x1c, 0xdb, 0x94, 0x2b, 0x48, + 0x9b, 0x72, 0xfa, 0x1f, 0x69, 0x80, 0xd2, 0xda, 0x8d, 0x9a, 0x50, 0x88, 0x26, 0x29, 0xec, 0xef, + 0x26, 0xb4, 0xa9, 0x90, 0xd2, 0xa6, 0x1b, 0x50, 0x8d, 0x22, 0x22, 0x77, 0x7f, 0x53, 0x40, 0x5c, + 0xd7, 0x4a, 0xb2, 0xae, 0xc5, 0x08, 0x2b, 0xcb, 0x84, 0x1d, 0x03, 0x4a, 0x5b, 0x4c, 0x7c, 0x26, + 0x4d, 0x9e, 0x69, 0x16, 0x85, 0x31, 0x4c, 0x45, 0x19, 0xd3, 0x7f, 0x14, 0x00, 0x4d, 0x63, 0x7e, + 0x74, 0x10, 0x95, 0x27, 0x50, 0x6e, 0xc1, 0x52, 0x3a, 0x23, 0x10, 0x69, 0x10, 0x4a, 0xe5, 0x03, + 0xaa, 0xd8, 0x5d, 0x54, 0x5d, 0x56, 0x7a, 0x27, 0xf2, 0x71, 0x2c, 0xc1, 0xb9, 0x95, 0x95, 0xe0, + 0x24, 0xdc, 0xdc, 0x6f, 0x24, 0x2f, 0x39, 0x31, 0xa3, 0xb9, 0xaf, 0xf4, 0x47, 0xa9, 0x25, 0xcf, + 0xba, 0xe1, 0x74, 0xf5, 0xeb, 0x49, 0xff, 0x56, 0x80, 0xc5, 0x88, 0x1b, 0x17, 0xe2, 0xf4, 0xec, + 0x83, 0xbf, 0xcf, 0x98, 0xb5, 0x1f, 0xaa, 0x59, 0xfb, 0xcb, 0xe7, 0xe6, 0xb0, 0x9f, 0x1f, 0x67, + 0x3f, 0x86, 0x79, 0xbe, 0x7d, 0x96, 0xb2, 0xdd, 0x3c, 0x55, 0xe2, 0x32, 0x94, 0x89, 0xab, 0x10, + 0xfb, 0x49, 0xec, 0x87, 0xb1, 0x34, 0x7e, 0x6f, 0x8d, 0x9b, 0x6f, 0x43, 0xba, 0xb6, 0xa6, 0xff, + 0xb5, 0x06, 0x70, 0x74, 0xe6, 0xf6, 0x1f, 0x30, 0x4b, 0x7b, 0x1b, 0x4a, 0xb3, 0xee, 0x71, 0x90, + 0xde, 0x34, 0x37, 0xa7, 0x3d, 0x73, 0x08, 0x57, 0xaa, 0x83, 0x8b, 0xc9, 0x3a, 0x38, 0xab, 0x82, + 0xcd, 0xf6, 0x2e, 0xff, 0xa0, 0xc1, 0x75, 0x42, 0xc4, 0xa7, 0x92, 0xb2, 0xe4, 0xe2, 0x70, 0xcc, + 0x73, 0x15, 0x65, 0xcf, 0x75, 0x1f, 0xe6, 0x59, 0x29, 0x2a, 0xd2, 0x87, 0x5b, 0x59, 0x2c, 0x63, + 0x0c, 0x36, 0x44, 0x77, 0xfd, 0x19, 0x34, 0x8c, 0xb8, 0x24, 0x10, 0x82, 0x52, 0xec, 0xb6, 0x0e, + 0xfd, 0xa6, 0xc9, 0xbc, 0x39, 0x36, 0xfb, 0x76, 0x78, 0x46, 0x09, 0x2b, 0x1b, 0xd1, 0xbf, 0x5a, + 0xec, 0xfa, 0x4f, 0x35, 0x58, 0x15, 0xe7, 0x07, 0x5c, 0xa9, 0x2e, 0xcf, 0x9b, 0x6d, 0x58, 0xe1, + 0x1a, 0x94, 0x50, 0x25, 0x96, 0x75, 0x2c, 0x31, 0x98, 0xbc, 0x8c, 0x6d, 0x58, 0x09, 0x4d, 0x7f, + 0x88, 0xc3, 0xe4, 0x18, 0xc6, 0xb9, 0x25, 0xd6, 0x28, 0x8f, 0xc9, 0x73, 0x7e, 0x73, 0x9b, 0x9d, + 0xc0, 0x73, 0x87, 0xc0, 0x75, 0x02, 0xdc, 0xc9, 0x88, 0xaf, 0x52, 0x3f, 0x85, 0x1b, 0xec, 0xbe, + 0x5c, 0x4f, 0xa6, 0xe8, 0x4a, 0xdb, 0xa7, 0xca, 0x75, 0x27, 0x4c, 0xe8, 0x4f, 0x35, 0xb8, 0x99, + 0x81, 0xf9, 0x2a, 0x69, 0xef, 0x63, 0x25, 0xf6, 0x8c, 0x0c, 0x5f, 0xc2, 0x4b, 0xf3, 0xd3, 0x04, + 0x91, 0x3f, 0x2b, 0xc1, 0x62, 0xaa, 0xd3, 0x85, 0x75, 0xee, 0x4d, 0x40, 0x44, 0x08, 0xd1, 0xf3, + 0x0b, 0x5a, 0x34, 0x71, 0x5f, 0xdd, 0x72, 0x27, 0xa3, 0xe8, 0xe9, 0x05, 0xa9, 0x9b, 0x90, 0xcd, + 0x7a, 0xb3, 0xcd, 0xd3, 0x48, 0x72, 0xa5, 0xec, 0xbb, 0xbb, 0x29, 0x02, 0x37, 0x0f, 0x26, 0x23, + 0xb6, 0xcf, 0xca, 0xa5, 0xcc, 0xfc, 0x2f, 0x41, 0x25, 0x81, 0xd1, 0x00, 0x16, 0xe9, 0xfd, 0x8c, + 0x49, 0x38, 0xf4, 0x48, 0xe6, 0x49, 0xe9, 0x62, 0x5e, 0xfe, 0x5b, 0xb9, 0x31, 0xbd, 0xcf, 0x47, + 0x13, 0xe2, 0x79, 0xf2, 0xe9, 0xca, 0x50, 0x81, 0xc7, 0x76, 0xfb, 0xde, 0x28, 0xc2, 0x33, 0x77, + 0x41, 0x3c, 0xfb, 0x7c, 0xb4, 0x8c, 0x27, 0x0e, 0xed, 0xec, 0xc0, 0x8a, 0x72, 0xe9, 0xb3, 0xe2, + 0x4a, 0x39, 0x9e, 0xc8, 0x3e, 0x84, 0x65, 0xd5, 0xaa, 0x2e, 0x31, 0x47, 0x8a, 0xe2, 0x8b, 0xcc, + 0xb1, 0xf1, 0x2b, 0x50, 0x8d, 0x4e, 0xbf, 0x50, 0x0d, 0xe6, 0x9f, 0xb9, 0xef, 0xb9, 0xde, 0xa9, + 0xdb, 0xba, 0x86, 0xe6, 0xa1, 0xf8, 0xc0, 0x71, 0x5a, 0x1a, 0x6a, 0x40, 0xf5, 0x28, 0xf4, 0xb1, + 0x49, 0x90, 0xb4, 0x0a, 0xa8, 0x09, 0xf0, 0xae, 0x1d, 0x84, 0x9e, 0x6f, 0xf7, 0x4d, 0xa7, 0x55, + 0xdc, 0xf8, 0x18, 0x9a, 0xf2, 0xde, 0x12, 0xaa, 0x43, 0xe5, 0xc0, 0x0b, 0xbf, 0xff, 0x91, 0x1d, + 0x84, 0xad, 0x6b, 0xa4, 0xff, 0x81, 0x17, 0x1e, 0xfa, 0x38, 0xc0, 0x6e, 0xd8, 0xd2, 0x10, 0xc0, + 0xdc, 0xfb, 0xee, 0xae, 0x1d, 0xbc, 0x68, 0x15, 0xd0, 0x12, 0xdf, 0x36, 0x36, 0x9d, 0x7d, 0xbe, + 0x61, 0xd3, 0x2a, 0x92, 0xe1, 0xd1, 0x5f, 0x09, 0xb5, 0xa0, 0x1e, 0x75, 0xd9, 0x3b, 0x7c, 0xd6, + 0x2a, 0xa3, 0x2a, 0x94, 0xd9, 0xe7, 0xdc, 0x86, 0x05, 0xad, 0xe4, 0x99, 0x07, 0x99, 0x93, 0x2d, + 0x22, 0x02, 0xb5, 0xae, 0x91, 0x95, 0xf1, 0x43, 0xa7, 0x96, 0x86, 0x16, 0xa0, 0x16, 0x3b, 0xc2, + 0x69, 0x15, 0x08, 0x60, 0xcf, 0x1f, 0xf7, 0xb9, 0x37, 0x62, 0x24, 0x10, 0x76, 0xee, 0x12, 0x4e, + 0x94, 0x36, 0x1e, 0x42, 0x45, 0x6c, 0x7a, 0x91, 0xae, 0x9c, 0x45, 0xe4, 0xb7, 0x75, 0x0d, 0x2d, + 0x42, 0x43, 0xba, 0xd6, 0xde, 0xd2, 0x10, 0x82, 0xa6, 0xfc, 0xf0, 0xa4, 0x55, 0xd8, 0xd8, 0x06, + 0x98, 0x26, 0x3f, 0x84, 0x9c, 0x7d, 0xf7, 0xc4, 0x74, 0x6c, 0x8b, 0xd1, 0x46, 0x9a, 0x08, 0x77, + 0x29, 0x77, 0x98, 0x66, 0xb5, 0x0a, 0x1b, 0xb7, 0xa1, 0x22, 0x02, 0x3a, 0x81, 0x1b, 0x78, 0xe4, + 0x9d, 0x60, 0x26, 0x99, 0x23, 0x1c, 0xb6, 0xb4, 0xed, 0x9f, 0x22, 0x00, 0x76, 0x4c, 0xe1, 0x79, + 0xbe, 0x85, 0x1c, 0x40, 0x7b, 0x38, 0xdc, 0xf1, 0x46, 0x63, 0xcf, 0x15, 0xdb, 0xa7, 0x01, 0xda, + 0x94, 0x75, 0x9f, 0xff, 0xa4, 0x3b, 0xf2, 0xd5, 0x77, 0x5e, 0x55, 0xf6, 0x4f, 0x74, 0xd6, 0xaf, + 0xa1, 0x11, 0xc5, 0xf6, 0xd4, 0x1e, 0xe1, 0xa7, 0x76, 0xff, 0x45, 0x74, 0xb6, 0x91, 0xfd, 0xe4, + 0x23, 0xd1, 0x55, 0xe0, 0xbb, 0xa3, 0xc4, 0x77, 0x14, 0xfa, 0xb6, 0x3b, 0x14, 0x5e, 0x5a, 0xbf, + 0x86, 0x5e, 0x26, 0x1e, 0x9c, 0x08, 0x84, 0xdb, 0x79, 0xde, 0x98, 0x5c, 0x0e, 0xa5, 0x03, 0x0b, + 0x89, 0x37, 0x78, 0x68, 0x43, 0x7d, 0x01, 0x58, 0xf5, 0x5e, 0xb0, 0xf3, 0x46, 0xae, 0xbe, 0x11, + 0x36, 0x1b, 0x9a, 0xf2, 0x3b, 0x33, 0xf4, 0x4b, 0x59, 0x13, 0xa4, 0x9e, 0x20, 0x74, 0x36, 0xf2, + 0x74, 0x8d, 0x50, 0x7d, 0xc0, 0x14, 0x74, 0x16, 0x2a, 0xe5, 0x73, 0x8d, 0xce, 0x79, 0x01, 0x52, + 0xbf, 0x86, 0x7e, 0x44, 0x62, 0x59, 0xe2, 0xa1, 0x04, 0x7a, 0x53, 0xed, 0x7f, 0xd5, 0xef, 0x29, + 0x66, 0x61, 0xf8, 0x20, 0x69, 0x5e, 0xd9, 0xd4, 0xa7, 0x9e, 0x4e, 0xe5, 0xa7, 0x3e, 0x36, 0xfd, + 0x79, 0xd4, 0x5f, 0x18, 0xc3, 0x84, 0x9a, 0x4d, 0xf2, 0xb0, 0xec, 0x2d, 0x15, 0x8a, 0xcc, 0xd7, + 0x1a, 0x9d, 0xcd, 0xbc, 0xdd, 0xe3, 0xda, 0x25, 0x3f, 0x08, 0x50, 0x33, 0x4d, 0xf9, 0x88, 0x41, + 0xad, 0x5d, 0xea, 0xf7, 0x05, 0xfa, 0x35, 0xf4, 0x54, 0x72, 0xaf, 0xe8, 0xb5, 0x2c, 0xe1, 0xc8, + 0x47, 0xe8, 0xb3, 0xf8, 0xf6, 0x9b, 0x80, 0x98, 0xed, 0xb8, 0x03, 0x7b, 0x38, 0xf1, 0x4d, 0xa6, + 0x58, 0x59, 0xee, 0x26, 0xdd, 0x55, 0xa0, 0xf9, 0xda, 0x05, 0x46, 0x44, 0x4b, 0xea, 0x02, 0xec, + 0xe1, 0xf0, 0x09, 0x0e, 0x7d, 0xbb, 0x1f, 0x24, 0x57, 0x34, 0xf5, 0xa8, 0xbc, 0x83, 0x40, 0xf5, + 0xfa, 0xcc, 0x7e, 0x11, 0x82, 0x1e, 0xd4, 0xf6, 0x48, 0xe6, 0x4d, 0xb3, 0x89, 0x00, 0x65, 0x8e, + 0x14, 0x3d, 0x04, 0x8a, 0xf5, 0xd9, 0x1d, 0xe3, 0xee, 0x2c, 0xf1, 0x38, 0x02, 0x65, 0x0a, 0x36, + 0xfd, 0x64, 0x43, 0xed, 0xce, 0x32, 0x5e, 0x5b, 0xb0, 0x15, 0xed, 0x1c, 0xe3, 0xfe, 0x8b, 0x77, + 0xb1, 0xe9, 0x84, 0xc7, 0x19, 0x2b, 0x8a, 0xf5, 0x38, 0x7f, 0x45, 0x52, 0xc7, 0x08, 0x07, 0x86, + 0xa5, 0x1d, 0x7a, 0xf2, 0x28, 0x97, 0x2c, 0x5b, 0xea, 0x29, 0xd2, 0x3d, 0x73, 0xaa, 0x9e, 0x09, + 0x8b, 0xbb, 0xbe, 0x37, 0x96, 0x91, 0xbc, 0xa5, 0x44, 0x92, 0xea, 0x97, 0x13, 0xc5, 0x0f, 0xa0, + 0x2e, 0x2a, 0x43, 0x9a, 0xcb, 0xaa, 0xb9, 0x10, 0xef, 0x92, 0x73, 0xe2, 0x0f, 0x61, 0x21, 0x51, + 0x72, 0xaa, 0x85, 0xae, 0xae, 0x4b, 0x67, 0xcd, 0x7e, 0x0a, 0x88, 0xbe, 0x78, 0x91, 0x5e, 0xdb, + 0x65, 0x64, 0x1c, 0xe9, 0x8e, 0x02, 0xc9, 0x56, 0xee, 0xfe, 0x91, 0xe4, 0x7f, 0x0b, 0x56, 0x94, + 0x65, 0x5d, 0xd2, 0x21, 0xf0, 0xfb, 0x5d, 0xe7, 0xd4, 0x9e, 0x49, 0x87, 0x70, 0xee, 0x08, 0x81, + 0x7f, 0xfb, 0x93, 0x26, 0x54, 0x69, 0xe6, 0x45, 0xa5, 0xf5, 0x8b, 0xc4, 0xeb, 0xd3, 0x4d, 0xbc, + 0x3e, 0x84, 0x85, 0xc4, 0x2b, 0x12, 0xb5, 0xd2, 0xaa, 0x9f, 0x9a, 0xe4, 0xc8, 0x1f, 0xe4, 0x77, + 0x1c, 0xea, 0x50, 0xa8, 0x7c, 0xeb, 0x31, 0x6b, 0xee, 0xe7, 0xec, 0x01, 0x56, 0x74, 0x86, 0xf9, + 0x7a, 0xe6, 0x2e, 0xa8, 0x7c, 0xf7, 0xed, 0x8b, 0xcf, 0x4b, 0x3e, 0xfb, 0xbc, 0xed, 0x43, 0x58, + 0x48, 0xdc, 0x40, 0x56, 0x4b, 0x55, 0x7d, 0x4d, 0x79, 0xd6, 0xec, 0x9f, 0x63, 0x82, 0x63, 0xc1, + 0x92, 0xe2, 0x72, 0x28, 0xda, 0xcc, 0xda, 0x5e, 0x54, 0xdf, 0x22, 0x9d, 0xbd, 0xa0, 0x86, 0x64, + 0x4a, 0xc9, 0x98, 0x30, 0x25, 0x32, 0xf9, 0x94, 0xbe, 0xf3, 0x66, 0xbe, 0x77, 0xf7, 0xd1, 0x82, + 0x8e, 0x60, 0x8e, 0xdd, 0x4b, 0x46, 0xaf, 0xa8, 0xcf, 0xe2, 0x62, 0x77, 0x96, 0x3b, 0xb3, 0x6e, + 0x36, 0x07, 0x13, 0x27, 0x0c, 0xe8, 0xa4, 0x65, 0xea, 0x21, 0x91, 0xf2, 0x42, 0x7d, 0xfc, 0x32, + 0x71, 0x67, 0xf6, 0xfd, 0x61, 0x31, 0xe9, 0xff, 0xef, 0x2c, 0xf0, 0x23, 0x58, 0x52, 0x9c, 0xd0, + 0xa3, 0xac, 0x6c, 0x3f, 0xe3, 0x6e, 0x40, 0x67, 0x2b, 0x77, 0xff, 0x08, 0xf3, 0x0f, 0xa1, 0x95, + 0xdc, 0xb6, 0x47, 0x6f, 0x64, 0xe9, 0xb3, 0x0a, 0xe7, 0xf9, 0xca, 0xfc, 0xf0, 0xeb, 0x1f, 0x6c, + 0x0f, 0xed, 0xf0, 0x78, 0xd2, 0x23, 0x2d, 0x5b, 0xac, 0xeb, 0x5b, 0xb6, 0xc7, 0xbf, 0xb6, 0x04, + 0xff, 0xb7, 0xe8, 0xe8, 0x2d, 0x8a, 0x6a, 0xdc, 0xeb, 0xcd, 0xd1, 0xdf, 0x7b, 0xff, 0x17, 0x00, + 0x00, 0xff, 0xff, 0xc2, 0xde, 0x60, 0xae, 0x08, 0x48, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4019,6 +4381,12 @@ type QueryCoordClient interface { GetReplicas(ctx context.Context, in *milvuspb.GetReplicasRequest, opts ...grpc.CallOption) (*milvuspb.GetReplicasResponse, error) GetShardLeaders(ctx context.Context, in *GetShardLeadersRequest, opts ...grpc.CallOption) (*GetShardLeadersResponse, error) CheckHealth(ctx context.Context, in *milvuspb.CheckHealthRequest, opts ...grpc.CallOption) (*milvuspb.CheckHealthResponse, error) + CreateResourceGroup(ctx context.Context, in *milvuspb.CreateResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + DropResourceGroup(ctx context.Context, in *milvuspb.DropResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + TransferNode(ctx context.Context, in *milvuspb.TransferNodeRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + TransferReplica(ctx context.Context, in *TransferReplicaRequest, opts ...grpc.CallOption) (*commonpb.Status, error) + ListResourceGroups(ctx context.Context, in *milvuspb.ListResourceGroupsRequest, opts ...grpc.CallOption) (*milvuspb.ListResourceGroupsResponse, error) + DescribeResourceGroup(ctx context.Context, in *DescribeResourceGroupRequest, opts ...grpc.CallOption) (*DescribeResourceGroupResponse, error) } type queryCoordClient struct { @@ -4182,6 +4550,60 @@ func (c *queryCoordClient) CheckHealth(ctx context.Context, in *milvuspb.CheckHe return out, nil } +func (c *queryCoordClient) CreateResourceGroup(ctx context.Context, in *milvuspb.CreateResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/CreateResourceGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryCoordClient) DropResourceGroup(ctx context.Context, in *milvuspb.DropResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/DropResourceGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryCoordClient) TransferNode(ctx context.Context, in *milvuspb.TransferNodeRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/TransferNode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryCoordClient) TransferReplica(ctx context.Context, in *TransferReplicaRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + out := new(commonpb.Status) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/TransferReplica", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryCoordClient) ListResourceGroups(ctx context.Context, in *milvuspb.ListResourceGroupsRequest, opts ...grpc.CallOption) (*milvuspb.ListResourceGroupsResponse, error) { + out := new(milvuspb.ListResourceGroupsResponse) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/ListResourceGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryCoordClient) DescribeResourceGroup(ctx context.Context, in *DescribeResourceGroupRequest, opts ...grpc.CallOption) (*DescribeResourceGroupResponse, error) { + out := new(DescribeResourceGroupResponse) + err := c.cc.Invoke(ctx, "/milvus.proto.query.QueryCoord/DescribeResourceGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // QueryCoordServer is the server API for QueryCoord service. type QueryCoordServer interface { GetComponentStates(context.Context, *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) @@ -4203,6 +4625,12 @@ type QueryCoordServer interface { GetReplicas(context.Context, *milvuspb.GetReplicasRequest) (*milvuspb.GetReplicasResponse, error) GetShardLeaders(context.Context, *GetShardLeadersRequest) (*GetShardLeadersResponse, error) CheckHealth(context.Context, *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) + CreateResourceGroup(context.Context, *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) + DropResourceGroup(context.Context, *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) + TransferNode(context.Context, *milvuspb.TransferNodeRequest) (*commonpb.Status, error) + TransferReplica(context.Context, *TransferReplicaRequest) (*commonpb.Status, error) + ListResourceGroups(context.Context, *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) + DescribeResourceGroup(context.Context, *DescribeResourceGroupRequest) (*DescribeResourceGroupResponse, error) } // UnimplementedQueryCoordServer can be embedded to have forward compatible implementations. @@ -4260,6 +4688,24 @@ func (*UnimplementedQueryCoordServer) GetShardLeaders(ctx context.Context, req * func (*UnimplementedQueryCoordServer) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CheckHealth not implemented") } +func (*UnimplementedQueryCoordServer) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateResourceGroup not implemented") +} +func (*UnimplementedQueryCoordServer) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method DropResourceGroup not implemented") +} +func (*UnimplementedQueryCoordServer) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransferNode not implemented") +} +func (*UnimplementedQueryCoordServer) TransferReplica(ctx context.Context, req *TransferReplicaRequest) (*commonpb.Status, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransferReplica not implemented") +} +func (*UnimplementedQueryCoordServer) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListResourceGroups not implemented") +} +func (*UnimplementedQueryCoordServer) DescribeResourceGroup(ctx context.Context, req *DescribeResourceGroupRequest) (*DescribeResourceGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeResourceGroup not implemented") +} func RegisterQueryCoordServer(s *grpc.Server, srv QueryCoordServer) { s.RegisterService(&_QueryCoord_serviceDesc, srv) @@ -4571,6 +5017,114 @@ func _QueryCoord_CheckHealth_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _QueryCoord_CreateResourceGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(milvuspb.CreateResourceGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).CreateResourceGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/CreateResourceGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).CreateResourceGroup(ctx, req.(*milvuspb.CreateResourceGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryCoord_DropResourceGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(milvuspb.DropResourceGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).DropResourceGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/DropResourceGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).DropResourceGroup(ctx, req.(*milvuspb.DropResourceGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryCoord_TransferNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(milvuspb.TransferNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).TransferNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/TransferNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).TransferNode(ctx, req.(*milvuspb.TransferNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryCoord_TransferReplica_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TransferReplicaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).TransferReplica(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/TransferReplica", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).TransferReplica(ctx, req.(*TransferReplicaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryCoord_ListResourceGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(milvuspb.ListResourceGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).ListResourceGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/ListResourceGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).ListResourceGroups(ctx, req.(*milvuspb.ListResourceGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryCoord_DescribeResourceGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeResourceGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryCoordServer).DescribeResourceGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.query.QueryCoord/DescribeResourceGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryCoordServer).DescribeResourceGroup(ctx, req.(*DescribeResourceGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _QueryCoord_serviceDesc = grpc.ServiceDesc{ ServiceName: "milvus.proto.query.QueryCoord", HandlerType: (*QueryCoordServer)(nil), @@ -4643,6 +5197,30 @@ var _QueryCoord_serviceDesc = grpc.ServiceDesc{ MethodName: "CheckHealth", Handler: _QueryCoord_CheckHealth_Handler, }, + { + MethodName: "CreateResourceGroup", + Handler: _QueryCoord_CreateResourceGroup_Handler, + }, + { + MethodName: "DropResourceGroup", + Handler: _QueryCoord_DropResourceGroup_Handler, + }, + { + MethodName: "TransferNode", + Handler: _QueryCoord_TransferNode_Handler, + }, + { + MethodName: "TransferReplica", + Handler: _QueryCoord_TransferReplica_Handler, + }, + { + MethodName: "ListResourceGroups", + Handler: _QueryCoord_ListResourceGroups_Handler, + }, + { + MethodName: "DescribeResourceGroup", + Handler: _QueryCoord_DescribeResourceGroup_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "query_coord.proto", diff --git a/internal/proxy/impl.go b/internal/proxy/impl.go index 357d247bac..bc5e27e447 100644 --- a/internal/proxy/impl.go +++ b/internal/proxy/impl.go @@ -3452,6 +3452,10 @@ func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReq commonpbutil.WithSourceID(paramtable.GetNodeID()), ) + if req.GetCollectionName() != "" { + req.CollectionID, _ = globalMetaCache.GetCollectionID(ctx, req.GetCollectionName()) + } + resp, err := node.queryCoord.GetReplicas(ctx, req) if err != nil { log.Error("Failed to get replicas from Query Coordinator", @@ -3758,7 +3762,6 @@ func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.U }, nil } -// func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCredentialRequest) (*commonpb.Status, error) { ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-CreateCredential") defer sp.End() @@ -3823,7 +3826,6 @@ func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCre return result, err } -// func (node *Proxy) UpdateCredential(ctx context.Context, req *milvuspb.UpdateCredentialRequest) (*commonpb.Status, error) { ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-UpdateCredential") defer sp.End() @@ -3897,7 +3899,6 @@ func (node *Proxy) UpdateCredential(ctx context.Context, req *milvuspb.UpdateCre return result, err } -// func (node *Proxy) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCredentialRequest) (*commonpb.Status, error) { ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-DeleteCredential") defer sp.End() @@ -4449,42 +4450,391 @@ func (node *Proxy) RenameCollection(ctx context.Context, req *milvuspb.RenameCol } func (node *Proxy) CreateResourceGroup(ctx context.Context, request *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil + if !node.checkHealthy() { + return unhealthyStatus(), nil + } + + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-CreateResourceGroup") + defer sp.End() + method := "CreateResourceGroup" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &CreateResourceGroupTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + CreateResourceGroupRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("CreateResourceGroup received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("CreateResourceGroup failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("CreateResourceGroup enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("CreateResourceGroup failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("CreateResourceGroup done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } func (node *Proxy) DropResourceGroup(ctx context.Context, request *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil + if !node.checkHealthy() { + return unhealthyStatus(), nil + } + + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-DropResourceGroup") + defer sp.End() + method := "DropResourceGroup" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &DropResourceGroupTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + DropResourceGroupRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("DropResourceGroup received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("DropResourceGroup failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("DropResourceGroup enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("DropResourceGroup failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("DropResourceGroup done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } func (node *Proxy) TransferNode(ctx context.Context, request *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil + if !node.checkHealthy() { + return unhealthyStatus(), nil + } + + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-TransferNode") + defer sp.End() + method := "TransferNode" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &TransferNodeTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + TransferNodeRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("TransferNode received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("TransferNode failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("TransferNode enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("TransferNode failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("TransferNode done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } func (node *Proxy) TransferReplica(ctx context.Context, request *milvuspb.TransferReplicaRequest) (*commonpb.Status, error) { + if !node.checkHealthy() { + return unhealthyStatus(), nil + } - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-TransferReplica") + defer sp.End() + method := "TransferReplica" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &TransferReplicaTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + TransferReplicaRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("TransferReplica received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("TransferReplica failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("TransferReplica enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("TransferReplica failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, nil + } + + log.Debug("TransferReplica done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } -func (node *Proxy) ListResourceGroup(ctx context.Context, request *milvuspb.ListResourceGroupRequest) (*milvuspb.ListResourceGroupResponse, error) { - return &milvuspb.ListResourceGroupResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - }, nil +func (node *Proxy) ListResourceGroups(ctx context.Context, request *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + if !node.checkHealthy() { + return &milvuspb.ListResourceGroupsResponse{ + Status: unhealthyStatus(), + }, nil + } + + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-ListResourceGroups") + defer sp.End() + method := "ListResourceGroups" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &ListResourceGroupsTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + ListResourceGroupsRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("ListResourceGroups received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("ListResourceGroups failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &milvuspb.ListResourceGroupsResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, + }, nil + } + + log.Debug("ListResourceGroups enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("ListResourceGroups failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &milvuspb.ListResourceGroupsResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, + }, nil + } + + log.Debug("ListResourceGroups done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } func (node *Proxy) DescribeResourceGroup(ctx context.Context, request *milvuspb.DescribeResourceGroupRequest) (*milvuspb.DescribeResourceGroupResponse, error) { - return &milvuspb.DescribeResourceGroupResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - }, nil + if !node.checkHealthy() { + return &milvuspb.DescribeResourceGroupResponse{ + Status: unhealthyStatus(), + }, nil + } + + ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-DescribeResourceGroup") + defer sp.End() + method := "DescribeResourceGroup" + tr := timerecord.NewTimeRecorder(method) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.TotalLabel).Inc() + t := &DescribeResourceGroupTask{ + ctx: ctx, + Condition: NewTaskCondition(ctx), + DescribeResourceGroupRequest: request, + queryCoord: node.queryCoord, + } + + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) + + log.Debug("DescribeResourceGroup received") + + if err := node.sched.ddQueue.Enqueue(t); err != nil { + log.Warn("DescribeResourceGroup failed to enqueue", + zap.Error(err)) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.AbandonLabel).Inc() + return &milvuspb.DescribeResourceGroupResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, + }, nil + } + + log.Debug("DescribeResourceGroup enqueued", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + if err := t.WaitToFinish(); err != nil { + log.Warn("DescribeResourceGroup failed to WaitToFinish", + zap.Error(err), + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.FailLabel).Inc() + return &milvuspb.DescribeResourceGroupResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + Reason: err.Error(), + }, + }, nil + } + + log.Debug("DescribeResourceGroup done", + zap.Uint64("BeginTS", t.BeginTs()), + zap.Uint64("EndTS", t.EndTs())) + + metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, + metrics.SuccessLabel).Inc() + metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds())) + return t.result, nil } diff --git a/internal/proxy/impl_test.go b/internal/proxy/impl_test.go index bf71d270b1..442b4a93ab 100644 --- a/internal/proxy/impl_test.go +++ b/internal/proxy/impl_test.go @@ -29,6 +29,7 @@ import ( "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/proxypb" + "github.com/milvus-io/milvus/internal/util/dependency" "github.com/milvus-io/milvus/internal/util/paramtable" "github.com/milvus-io/milvus/internal/util/sessionutil" ) @@ -198,3 +199,75 @@ func TestProxyRenameCollection(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) }) } + +func TestProxy_ResourceGroup(t *testing.T) { + factory := dependency.NewDefaultFactory(true) + ctx := context.Background() + + node, err := NewProxy(ctx, factory) + assert.NoError(t, err) + node.multiRateLimiter = NewMultiRateLimiter() + node.stateCode.Store(commonpb.StateCode_Healthy) + + qc := NewQueryCoordMock() + node.SetQueryCoordClient(qc) + + tsoAllocatorIns := newMockTsoAllocator() + node.sched, err = newTaskScheduler(node.ctx, tsoAllocatorIns, node.factory) + assert.NoError(t, err) + node.sched.Start() + defer node.sched.Close() + + rc := &MockRootCoordClientInterface{} + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + + t.Run("create resource group", func(t *testing.T) { + resp, err := node.CreateResourceGroup(ctx, &milvuspb.CreateResourceGroupRequest{ + ResourceGroup: "rg", + }) + assert.NoError(t, err) + assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_Success) + }) + + t.Run("drop resource group", func(t *testing.T) { + resp, err := node.DropResourceGroup(ctx, &milvuspb.DropResourceGroupRequest{ + ResourceGroup: "rg", + }) + assert.NoError(t, err) + assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_Success) + }) + + t.Run("transfer node", func(t *testing.T) { + resp, err := node.TransferNode(ctx, &milvuspb.TransferNodeRequest{ + SourceResourceGroup: "rg1", + TargetResourceGroup: "rg2", + NumNode: 1, + }) + assert.NoError(t, err) + assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_Success) + }) + + t.Run("transfer replica", func(t *testing.T) { + resp, err := node.TransferReplica(ctx, &milvuspb.TransferReplicaRequest{ + SourceResourceGroup: "rg1", + TargetResourceGroup: "rg2", + NumReplica: 1, + CollectionName: "collection1", + }) + assert.NoError(t, err) + assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_Success) + }) + + t.Run("list resource group", func(t *testing.T) { + resp, err := node.ListResourceGroups(ctx, &milvuspb.ListResourceGroupsRequest{}) + assert.NoError(t, err) + assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success) + }) + + t.Run("describe resource group", func(t *testing.T) { + resp, err := node.DescribeResourceGroup(ctx, &milvuspb.DescribeResourceGroupRequest{}) + assert.NoError(t, err) + assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success) + }) +} diff --git a/internal/proxy/meta_cache.go b/internal/proxy/meta_cache.go index 4b8f272667..3a1b08bcba 100644 --- a/internal/proxy/meta_cache.go +++ b/internal/proxy/meta_cache.go @@ -51,6 +51,8 @@ import ( type Cache interface { // GetCollectionID get collection's id by name. GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) + // GetCollectionName get collection's name by id + GetCollectionName(ctx context.Context, collectionID int64) (string, error) // GetCollectionInfo get collection's information by name, such as collection id, schema, and etc. GetCollectionInfo(ctx context.Context, collectionName string) (*collectionInfo, error) // GetPartitionID get partition's identifier of specific collection. @@ -196,7 +198,7 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string) metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GeCollectionID", metrics.CacheMissLabel).Inc() tr := timerecord.NewTimeRecorder("UpdateCache") m.mu.RUnlock() - coll, err := m.describeCollection(ctx, collectionName) + coll, err := m.describeCollection(ctx, collectionName, 0) if err != nil { return 0, err } @@ -213,6 +215,37 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string) return collInfo.collID, nil } +// GetCollectionName returns the corresponding collection name for provided collection id +func (m *MetaCache) GetCollectionName(ctx context.Context, collectionID int64) (string, error) { + m.mu.RLock() + var collInfo *collectionInfo + for _, coll := range m.collInfo { + if coll.collID == collectionID { + collInfo = coll + break + } + } + + if collInfo == nil || !collInfo.isCollectionCached() { + metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GeCollectionName", metrics.CacheMissLabel).Inc() + tr := timerecord.NewTimeRecorder("UpdateCache") + m.mu.RUnlock() + coll, err := m.describeCollection(ctx, "", collectionID) + if err != nil { + return "", err + } + m.mu.Lock() + defer m.mu.Unlock() + m.updateCollection(coll, coll.Schema.Name) + metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds())) + return coll.Schema.Name, nil + } + defer m.mu.RUnlock() + metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GeCollectionName", metrics.CacheHitLabel).Inc() + + return collInfo.schema.Name, nil +} + // GetCollectionInfo returns the collection information related to provided collection name // If the information is not found, proxy will try to fetch information for other source (RootCoord for now) func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string) (*collectionInfo, error) { @@ -224,7 +257,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string if !ok || !collInfo.isCollectionCached() { tr := timerecord.NewTimeRecorder("UpdateCache") metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheMissLabel).Inc() - coll, err := m.describeCollection(ctx, collectionName) + coll, err := m.describeCollection(ctx, collectionName, 0) if err != nil { return nil, err } @@ -281,7 +314,7 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheMissLabel).Inc() tr := timerecord.NewTimeRecorder("UpdateCache") m.mu.RUnlock() - coll, err := m.describeCollection(ctx, collectionName) + coll, err := m.describeCollection(ctx, collectionName, 0) if err != nil { log.Warn("Failed to load collection from rootcoord ", zap.String("collection name ", collectionName), @@ -294,7 +327,7 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri collInfo = m.collInfo[collectionName] metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds())) log.Debug("Reload collection from root coordinator ", - zap.String("collection name ", collectionName), + zap.String("collection name", collectionName), zap.Any("time (milliseconds) take ", tr.ElapseSpan().Milliseconds())) return collInfo.schema, nil } @@ -424,12 +457,13 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string, } // Get the collection information from rootcoord. -func (m *MetaCache) describeCollection(ctx context.Context, collectionName string) (*milvuspb.DescribeCollectionResponse, error) { +func (m *MetaCache) describeCollection(ctx context.Context, collectionName string, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) { req := &milvuspb.DescribeCollectionRequest{ Base: commonpbutil.NewMsgBase( commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection), ), CollectionName: collectionName, + CollectionID: collectionID, } coll, err := m.rootCoord.DescribeCollection(ctx, req) if err != nil { diff --git a/internal/proxy/meta_cache_test.go b/internal/proxy/meta_cache_test.go index b8b83d4908..f32e2b7919 100644 --- a/internal/proxy/meta_cache_test.go +++ b/internal/proxy/meta_cache_test.go @@ -127,7 +127,7 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i return nil, errors.New("mocked error") } m.IncAccessCount() - if in.CollectionName == "collection1" { + if in.CollectionName == "collection1" || in.CollectionID == 1 { return &milvuspb.DescribeCollectionResponse{ Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, @@ -135,10 +135,11 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i CollectionID: typeutil.UniqueID(1), Schema: &schemapb.CollectionSchema{ AutoID: true, + Name: "collection1", }, }, nil } - if in.CollectionName == "collection2" { + if in.CollectionName == "collection2" || in.CollectionID == 2 { return &milvuspb.DescribeCollectionResponse{ Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, @@ -146,6 +147,7 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i CollectionID: typeutil.UniqueID(2), Schema: &schemapb.CollectionSchema{ AutoID: true, + Name: "collection2", }, }, nil } @@ -230,7 +232,7 @@ func (m *MockQueryCoordClientInterface) ShowCollections(ctx context.Context, req return rsp, nil } -//Simulate the cache path and the +// Simulate the cache path and the func TestMetaCache_GetCollection(t *testing.T) { ctx := context.Background() rootCoord := &MockRootCoordClientInterface{} @@ -251,6 +253,7 @@ func TestMetaCache_GetCollection(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection1", }) id, err = globalMetaCache.GetCollectionID(ctx, "collection2") assert.Equal(t, rootCoord.GetAccessCount(), 2) @@ -262,6 +265,7 @@ func TestMetaCache_GetCollection(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection2", }) // test to get from cache, this should trigger root request @@ -275,10 +279,61 @@ func TestMetaCache_GetCollection(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection1", }) } +func TestMetaCache_GetCollectionName(t *testing.T) { + ctx := context.Background() + rootCoord := &MockRootCoordClientInterface{} + queryCoord := &MockQueryCoordClientInterface{} + mgr := newShardClientMgr() + err := InitMetaCache(ctx, rootCoord, queryCoord, mgr) + assert.Nil(t, err) + + collection, err := globalMetaCache.GetCollectionName(ctx, 1) + assert.Nil(t, err) + assert.Equal(t, collection, "collection1") + assert.Equal(t, rootCoord.GetAccessCount(), 1) + + // should'nt be accessed to remote root coord. + schema, err := globalMetaCache.GetCollectionSchema(ctx, "collection1") + assert.Equal(t, rootCoord.GetAccessCount(), 1) + assert.Nil(t, err) + assert.Equal(t, schema, &schemapb.CollectionSchema{ + AutoID: true, + Fields: []*schemapb.FieldSchema{}, + Name: "collection1", + }) + collection, err = globalMetaCache.GetCollectionName(ctx, 1) + assert.Equal(t, rootCoord.GetAccessCount(), 1) + assert.Nil(t, err) + assert.Equal(t, collection, "collection1") + schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection2") + assert.Equal(t, rootCoord.GetAccessCount(), 2) + assert.Nil(t, err) + assert.Equal(t, schema, &schemapb.CollectionSchema{ + AutoID: true, + Fields: []*schemapb.FieldSchema{}, + Name: "collection2", + }) + + // test to get from cache, this should trigger root request + collection, err = globalMetaCache.GetCollectionName(ctx, 1) + assert.Equal(t, rootCoord.GetAccessCount(), 2) + assert.Nil(t, err) + assert.Equal(t, collection, "collection1") + schema, err = globalMetaCache.GetCollectionSchema(ctx, "collection1") + assert.Equal(t, rootCoord.GetAccessCount(), 2) + assert.Nil(t, err) + assert.Equal(t, schema, &schemapb.CollectionSchema{ + AutoID: true, + Fields: []*schemapb.FieldSchema{}, + Name: "collection1", + }) +} + func TestMetaCache_GetCollectionFailure(t *testing.T) { ctx := context.Background() rootCoord := &MockRootCoordClientInterface{} @@ -299,6 +354,7 @@ func TestMetaCache_GetCollectionFailure(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection1", }) rootCoord.Error = true @@ -307,6 +363,7 @@ func TestMetaCache_GetCollectionFailure(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection1", }) } @@ -367,6 +424,7 @@ func TestMetaCache_ConcurrentTest1(t *testing.T) { assert.Equal(t, schema, &schemapb.CollectionSchema{ AutoID: true, Fields: []*schemapb.FieldSchema{}, + Name: "collection1", }) time.Sleep(10 * time.Millisecond) } diff --git a/internal/proxy/mock_cache_test.go b/internal/proxy/mock_cache_test.go index 5cb7819275..5d8bcb4c02 100644 --- a/internal/proxy/mock_cache_test.go +++ b/internal/proxy/mock_cache_test.go @@ -8,6 +8,7 @@ import ( ) type getCollectionIDFunc func(ctx context.Context, collectionName string) (typeutil.UniqueID, error) +type getCollectionNameFunc func(ctx context.Context, collectionID int64) (string, error) type getCollectionSchemaFunc func(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) type getCollectionInfoFunc func(ctx context.Context, collectionName string) (*collectionInfo, error) type getUserRoleFunc func(username string) []string @@ -16,6 +17,7 @@ type getPartitionIDFunc func(ctx context.Context, collectionName string, partiti type mockCache struct { Cache getIDFunc getCollectionIDFunc + getNameFunc getCollectionNameFunc getSchemaFunc getCollectionSchemaFunc getInfoFunc getCollectionInfoFunc getUserRoleFunc getUserRoleFunc @@ -29,6 +31,13 @@ func (m *mockCache) GetCollectionID(ctx context.Context, collectionName string) return 0, nil } +func (m *mockCache) GetCollectionName(ctx context.Context, collectionID int64) (string, error) { + if m.getIDFunc != nil { + return m.getNameFunc(ctx, collectionID) + } + return "", nil +} + func (m *mockCache) GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) { if m.getSchemaFunc != nil { return m.getSchemaFunc(ctx, collectionName) diff --git a/internal/proxy/query_coord_mock_test.go b/internal/proxy/query_coord_mock_test.go index 10d674b197..29f1782bfa 100644 --- a/internal/proxy/query_coord_mock_test.go +++ b/internal/proxy/query_coord_mock_test.go @@ -22,6 +22,7 @@ import ( "sync" "sync/atomic" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" "github.com/milvus-io/milvus/internal/util/funcutil" "github.com/milvus-io/milvus/internal/util/uniquegenerator" @@ -423,6 +424,60 @@ func (coord *QueryCoordMock) GetShardLeaders(ctx context.Context, req *querypb.G }, nil } +func (coord *QueryCoordMock) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, nil +} + +func (coord *QueryCoordMock) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, nil +} + +func (coord *QueryCoordMock) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, nil +} + +func (coord *QueryCoordMock) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, nil +} + +func (coord *QueryCoordMock) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + return &milvuspb.ListResourceGroupsResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, + ResourceGroups: []string{meta.DefaultResourceGroupName, "rg"}, + }, nil +} + +func (coord *QueryCoordMock) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + return &querypb.DescribeResourceGroupResponse{ + Status: &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, + ResourceGroup: &querypb.ResourceGroupInfo{ + Name: "rg", + Capacity: 2, + NumAvailableNode: 1, + NumOutgoingNode: map[int64]int32{1: 1}, + NumIncomingNode: map[int64]int32{2: 2}, + }, + }, nil +} + func NewQueryCoordMock(opts ...QueryCoordMockOption) *QueryCoordMock { coord := &QueryCoordMock{ nodeID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()), diff --git a/internal/proxy/task.go b/internal/proxy/task.go index 2d7cb15ce2..af2cc755f9 100644 --- a/internal/proxy/task.go +++ b/internal/proxy/task.go @@ -39,6 +39,7 @@ import ( "github.com/milvus-io/milvus/internal/util/commonpbutil" "github.com/milvus-io/milvus/internal/util/paramtable" "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/samber/lo" ) const ( @@ -51,27 +52,33 @@ const ( OffsetKey = "offset" LimitKey = "limit" - InsertTaskName = "InsertTask" - CreateCollectionTaskName = "CreateCollectionTask" - DropCollectionTaskName = "DropCollectionTask" - HasCollectionTaskName = "HasCollectionTask" - DescribeCollectionTaskName = "DescribeCollectionTask" - ShowCollectionTaskName = "ShowCollectionTask" - CreatePartitionTaskName = "CreatePartitionTask" - DropPartitionTaskName = "DropPartitionTask" - HasPartitionTaskName = "HasPartitionTask" - ShowPartitionTaskName = "ShowPartitionTask" - FlushTaskName = "FlushTask" - LoadCollectionTaskName = "LoadCollectionTask" - ReleaseCollectionTaskName = "ReleaseCollectionTask" - LoadPartitionTaskName = "LoadPartitionsTask" - ReleasePartitionTaskName = "ReleasePartitionsTask" - DeleteTaskName = "DeleteTask" - CreateAliasTaskName = "CreateAliasTask" - DropAliasTaskName = "DropAliasTask" - AlterAliasTaskName = "AlterAliasTask" - AlterCollectionTaskName = "AlterCollectionTask" - UpsertTaskName = "UpsertTask" + InsertTaskName = "InsertTask" + CreateCollectionTaskName = "CreateCollectionTask" + DropCollectionTaskName = "DropCollectionTask" + HasCollectionTaskName = "HasCollectionTask" + DescribeCollectionTaskName = "DescribeCollectionTask" + ShowCollectionTaskName = "ShowCollectionTask" + CreatePartitionTaskName = "CreatePartitionTask" + DropPartitionTaskName = "DropPartitionTask" + HasPartitionTaskName = "HasPartitionTask" + ShowPartitionTaskName = "ShowPartitionTask" + FlushTaskName = "FlushTask" + LoadCollectionTaskName = "LoadCollectionTask" + ReleaseCollectionTaskName = "ReleaseCollectionTask" + LoadPartitionTaskName = "LoadPartitionsTask" + ReleasePartitionTaskName = "ReleasePartitionsTask" + DeleteTaskName = "DeleteTask" + CreateAliasTaskName = "CreateAliasTask" + DropAliasTaskName = "DropAliasTask" + AlterAliasTaskName = "AlterAliasTask" + AlterCollectionTaskName = "AlterCollectionTask" + UpsertTaskName = "UpsertTask" + CreateResourceGroupTaskName = "CreateResourceGroupTask" + DropResourceGroupTaskName = "DropResourceGroupTask" + TransferNodeTaskName = "TransferNodeTask" + TransferReplicaTaskName = "TransferReplicaTask" + ListResourceGroupsTaskName = "ListResourceGroupsTask" + DescribeResourceGroupTaskName = "DescribeResourceGroupTask" // minFloat32 minimum float. minFloat32 = -1 * float32(math.MaxFloat32) @@ -1916,3 +1923,412 @@ func (a *AlterAliasTask) Execute(ctx context.Context) error { func (a *AlterAliasTask) PostExecute(ctx context.Context) error { return nil } + +type CreateResourceGroupTask struct { + Condition + *milvuspb.CreateResourceGroupRequest + ctx context.Context + queryCoord types.QueryCoord + result *commonpb.Status +} + +func (t *CreateResourceGroupTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *CreateResourceGroupTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *CreateResourceGroupTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *CreateResourceGroupTask) Name() string { + return CreateResourceGroupTaskName +} + +func (t *CreateResourceGroupTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *CreateResourceGroupTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *CreateResourceGroupTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *CreateResourceGroupTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *CreateResourceGroupTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *CreateResourceGroupTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_CreateResourceGroup + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *CreateResourceGroupTask) Execute(ctx context.Context) error { + var err error + t.result, err = t.queryCoord.CreateResourceGroup(ctx, t.CreateResourceGroupRequest) + return err +} + +func (t *CreateResourceGroupTask) PostExecute(ctx context.Context) error { + return nil +} + +type DropResourceGroupTask struct { + Condition + *milvuspb.DropResourceGroupRequest + ctx context.Context + queryCoord types.QueryCoord + result *commonpb.Status +} + +func (t *DropResourceGroupTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *DropResourceGroupTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *DropResourceGroupTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *DropResourceGroupTask) Name() string { + return DropResourceGroupTaskName +} + +func (t *DropResourceGroupTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *DropResourceGroupTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *DropResourceGroupTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *DropResourceGroupTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *DropResourceGroupTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *DropResourceGroupTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_DropResourceGroup + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *DropResourceGroupTask) Execute(ctx context.Context) error { + var err error + t.result, err = t.queryCoord.DropResourceGroup(ctx, t.DropResourceGroupRequest) + return err +} + +func (t *DropResourceGroupTask) PostExecute(ctx context.Context) error { + return nil +} + +type DescribeResourceGroupTask struct { + Condition + *milvuspb.DescribeResourceGroupRequest + ctx context.Context + queryCoord types.QueryCoord + result *milvuspb.DescribeResourceGroupResponse +} + +func (t *DescribeResourceGroupTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *DescribeResourceGroupTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *DescribeResourceGroupTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *DescribeResourceGroupTask) Name() string { + return DescribeResourceGroupTaskName +} + +func (t *DescribeResourceGroupTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *DescribeResourceGroupTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *DescribeResourceGroupTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *DescribeResourceGroupTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *DescribeResourceGroupTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *DescribeResourceGroupTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_DescribeResourceGroup + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *DescribeResourceGroupTask) Execute(ctx context.Context) error { + var err error + resp, err := t.queryCoord.DescribeResourceGroup(ctx, &querypb.DescribeResourceGroupRequest{ + ResourceGroup: t.ResourceGroup, + }) + rgInfo := resp.GetResourceGroup() + + getCollectionNameFunc := func(value int32, key int64) string { + name, err := globalMetaCache.GetCollectionName(ctx, key) + if err != nil { + // unreachable logic path + return "unavailable_collection" + } + return name + } + + loadReplicas := lo.MapKeys(rgInfo.NumLoadedReplica, getCollectionNameFunc) + outgoingNodes := lo.MapKeys(rgInfo.NumOutgoingNode, getCollectionNameFunc) + incomingNodes := lo.MapKeys(rgInfo.NumIncomingNode, getCollectionNameFunc) + + t.result = &milvuspb.DescribeResourceGroupResponse{ + Status: resp.Status, + ResourceGroup: &milvuspb.ResourceGroup{ + Name: rgInfo.GetName(), + Capacity: rgInfo.GetCapacity(), + NumAvailableNode: rgInfo.NumAvailableNode, + NumLoadedReplica: loadReplicas, + NumOutgoingNode: outgoingNodes, + NumIncomingNode: incomingNodes, + }, + } + return err +} + +func (t *DescribeResourceGroupTask) PostExecute(ctx context.Context) error { + return nil +} + +type TransferNodeTask struct { + Condition + *milvuspb.TransferNodeRequest + ctx context.Context + queryCoord types.QueryCoord + result *commonpb.Status +} + +func (t *TransferNodeTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *TransferNodeTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *TransferNodeTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *TransferNodeTask) Name() string { + return TransferNodeTaskName +} + +func (t *TransferNodeTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *TransferNodeTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *TransferNodeTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *TransferNodeTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *TransferNodeTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *TransferNodeTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_TransferNode + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *TransferNodeTask) Execute(ctx context.Context) error { + var err error + t.result, err = t.queryCoord.TransferNode(ctx, t.TransferNodeRequest) + return err +} + +func (t *TransferNodeTask) PostExecute(ctx context.Context) error { + return nil +} + +type TransferReplicaTask struct { + Condition + *milvuspb.TransferReplicaRequest + ctx context.Context + queryCoord types.QueryCoord + result *commonpb.Status +} + +func (t *TransferReplicaTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *TransferReplicaTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *TransferReplicaTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *TransferReplicaTask) Name() string { + return TransferReplicaTaskName +} + +func (t *TransferReplicaTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *TransferReplicaTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *TransferReplicaTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *TransferReplicaTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *TransferReplicaTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *TransferReplicaTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_TransferReplica + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *TransferReplicaTask) Execute(ctx context.Context) error { + var err error + collID, err := globalMetaCache.GetCollectionID(ctx, t.CollectionName) + if err != nil { + return err + } + t.result, err = t.queryCoord.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: t.SourceResourceGroup, + TargetResourceGroup: t.TargetResourceGroup, + CollectionID: collID, + NumReplica: t.NumReplica, + }) + return err +} + +func (t *TransferReplicaTask) PostExecute(ctx context.Context) error { + return nil +} + +type ListResourceGroupsTask struct { + Condition + *milvuspb.ListResourceGroupsRequest + ctx context.Context + queryCoord types.QueryCoord + result *milvuspb.ListResourceGroupsResponse +} + +func (t *ListResourceGroupsTask) TraceCtx() context.Context { + return t.ctx +} + +func (t *ListResourceGroupsTask) ID() UniqueID { + return t.Base.MsgID +} + +func (t *ListResourceGroupsTask) SetID(uid UniqueID) { + t.Base.MsgID = uid +} + +func (t *ListResourceGroupsTask) Name() string { + return ListResourceGroupsTaskName +} + +func (t *ListResourceGroupsTask) Type() commonpb.MsgType { + return t.Base.MsgType +} + +func (t *ListResourceGroupsTask) BeginTs() Timestamp { + return t.Base.Timestamp +} + +func (t *ListResourceGroupsTask) EndTs() Timestamp { + return t.Base.Timestamp +} + +func (t *ListResourceGroupsTask) SetTs(ts Timestamp) { + t.Base.Timestamp = ts +} + +func (t *ListResourceGroupsTask) OnEnqueue() error { + t.Base = commonpbutil.NewMsgBase() + return nil +} + +func (t *ListResourceGroupsTask) PreExecute(ctx context.Context) error { + t.Base.MsgType = commonpb.MsgType_ListResourceGroups + t.Base.SourceID = paramtable.GetNodeID() + + return nil +} + +func (t *ListResourceGroupsTask) Execute(ctx context.Context) error { + var err error + t.result, err = t.queryCoord.ListResourceGroups(ctx, t.ListResourceGroupsRequest) + return err +} + +func (t *ListResourceGroupsTask) PostExecute(ctx context.Context) error { + return nil +} diff --git a/internal/proxy/task_test.go b/internal/proxy/task_test.go index 4ea647d63e..c6237ae1b5 100644 --- a/internal/proxy/task_test.go +++ b/internal/proxy/task_test.go @@ -28,6 +28,7 @@ import ( "time" "github.com/milvus-io/milvus/internal/proto/indexpb" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" "github.com/golang/protobuf/proto" "github.com/milvus-io/milvus-proto/go-api/commonpb" @@ -2538,3 +2539,248 @@ func Test_loadPartitionTask_Execute(t *testing.T) { assert.Error(t, err) }) } + +func TestCreateResourceGroupTask(t *testing.T) { + rc := NewRootCoordMock() + rc.Start() + defer rc.Stop() + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + + createRGReq := &milvuspb.CreateResourceGroupRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + ResourceGroup: "rg", + } + + task := &CreateResourceGroupTask{ + CreateResourceGroupRequest: createRGReq, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_CreateResourceGroup, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode) +} + +func TestDropResourceGroupTask(t *testing.T) { + rc := NewRootCoordMock() + rc.Start() + defer rc.Stop() + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + + dropRGReq := &milvuspb.DropResourceGroupRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + ResourceGroup: "rg", + } + + task := &DropResourceGroupTask{ + DropResourceGroupRequest: dropRGReq, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_DropResourceGroup, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode) +} + +func TestTransferNodeTask(t *testing.T) { + rc := NewRootCoordMock() + rc.Start() + defer rc.Stop() + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + + req := &milvuspb.TransferNodeRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + SourceResourceGroup: "rg1", + TargetResourceGroup: "rg2", + NumNode: 1, + } + + task := &TransferNodeTask{ + TransferNodeRequest: req, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_TransferNode, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode) +} + +func TestTransferReplicaTask(t *testing.T) { + rc := &MockRootCoordClientInterface{} + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + // make it avoid remote call on rc + globalMetaCache.GetCollectionSchema(context.Background(), "collection1") + + req := &milvuspb.TransferReplicaRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + CollectionName: "collection1", + SourceResourceGroup: "rg1", + TargetResourceGroup: "rg2", + NumReplica: 1, + } + + task := &TransferReplicaTask{ + TransferReplicaRequest: req, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_TransferReplica, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.ErrorCode) +} + +func TestListResourceGroupsTask(t *testing.T) { + rc := &MockRootCoordClientInterface{} + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + + req := &milvuspb.ListResourceGroupsRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + } + + task := &ListResourceGroupsTask{ + ListResourceGroupsRequest: req, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_ListResourceGroups, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode) + groups := task.result.GetResourceGroups() + assert.Contains(t, groups, meta.DefaultResourceGroupName) + assert.Contains(t, groups, "rg") +} + +func TestDescribeResourceGroupTask(t *testing.T) { + rc := &MockRootCoordClientInterface{} + qc := NewQueryCoordMock() + qc.Start() + defer qc.Stop() + ctx := context.Background() + mgr := newShardClientMgr() + InitMetaCache(ctx, rc, qc, mgr) + // make it avoid remote call on rc + globalMetaCache.GetCollectionSchema(context.Background(), "collection1") + globalMetaCache.GetCollectionSchema(context.Background(), "collection2") + + req := &milvuspb.DescribeResourceGroupRequest{ + Base: &commonpb.MsgBase{ + MsgID: 1, + Timestamp: 2, + TargetID: 3, + }, + ResourceGroup: "rg", + } + + task := &DescribeResourceGroupTask{ + DescribeResourceGroupRequest: req, + ctx: ctx, + queryCoord: qc, + } + task.PreExecute(ctx) + + assert.Equal(t, commonpb.MsgType_DescribeResourceGroup, task.Type()) + assert.Equal(t, UniqueID(1), task.ID()) + assert.Equal(t, Timestamp(2), task.BeginTs()) + assert.Equal(t, Timestamp(2), task.EndTs()) + assert.Equal(t, paramtable.GetNodeID(), task.Base.GetSourceID()) + assert.Equal(t, UniqueID(3), task.Base.GetTargetID()) + + err := task.Execute(ctx) + assert.Nil(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode) + groupInfo := task.result.GetResourceGroup() + outgoingNodeNum := groupInfo.GetNumOutgoingNode() + incomingNodeNum := groupInfo.GetNumIncomingNode() + assert.NotNil(t, outgoingNodeNum["collection1"]) + assert.NotNil(t, incomingNodeNum["collection2"]) +} diff --git a/internal/querycoordv2/balance/rowcount_based_balancer.go b/internal/querycoordv2/balance/rowcount_based_balancer.go index c2ec67909d..94ea9773d9 100644 --- a/internal/querycoordv2/balance/rowcount_based_balancer.go +++ b/internal/querycoordv2/balance/rowcount_based_balancer.go @@ -104,7 +104,7 @@ func (b *RowCountBasedBalancer) Balance() ([]SegmentAssignPlan, []ChannelAssignP } func (b *RowCountBasedBalancer) balanceReplica(replica *meta.Replica) ([]SegmentAssignPlan, []ChannelAssignPlan) { - nodes := replica.Nodes.Collect() + nodes := replica.GetNodes() if len(nodes) == 0 { return nil, nil } @@ -112,6 +112,8 @@ func (b *RowCountBasedBalancer) balanceReplica(replica *meta.Replica) ([]Segment nodesSegments := make(map[int64][]*meta.Segment) stoppingNodesSegments := make(map[int64][]*meta.Segment) + outboundNodes := b.meta.ResourceManager.CheckOutboundNodes(replica) + totalCnt := 0 for _, nid := range nodes { segments := b.dist.SegmentDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nid) @@ -125,6 +127,14 @@ func (b *RowCountBasedBalancer) balanceReplica(replica *meta.Replica) ([]Segment continue } else if isStopping { stoppingNodesSegments[nid] = segments + } else if outboundNodes.Contain(nid) { + // if node is stop or transfer to other rg + log.RatedInfo(10, "meet outbound node, try to move out all segment/channel", + zap.Int64("collectionID", replica.GetCollectionID()), + zap.Int64("replicaID", replica.GetCollectionID()), + zap.Int64("node", nid), + ) + stoppingNodesSegments[nid] = segments } else { nodesSegments[nid] = segments } @@ -224,7 +234,7 @@ outer: node.setPriority(node.getPriority() + int(s.GetNumOfRows())) queue.push(node) } - return plans, b.getChannelPlan(replica, stoppingNodesSegments) + return plans, b.getChannelPlan(replica, lo.Keys(nodesSegments), lo.Keys(stoppingNodesSegments)) } func (b *RowCountBasedBalancer) handleStoppingNodes(replica *meta.Replica, nodeSegments map[int64][]*meta.Segment) ([]SegmentAssignPlan, []ChannelAssignPlan) { @@ -271,17 +281,11 @@ func (b *RowCountBasedBalancer) collectionStoppingSegments(stoppingNodesSegments return segments, removeRowCnt } -func (b *RowCountBasedBalancer) getChannelPlan(replica *meta.Replica, stoppingNodesSegments map[int64][]*meta.Segment) []ChannelAssignPlan { - // maybe it will have some strategies to balance the channel in the future - // but now, only balance the channel for the stopping nodes. - return b.getChannelPlanForStoppingNodes(replica, stoppingNodesSegments) -} - -func (b *RowCountBasedBalancer) getChannelPlanForStoppingNodes(replica *meta.Replica, stoppingNodesSegments map[int64][]*meta.Segment) []ChannelAssignPlan { +func (b *RowCountBasedBalancer) getChannelPlan(replica *meta.Replica, onlineNodes []int64, offlineNodes []int64) []ChannelAssignPlan { channelPlans := make([]ChannelAssignPlan, 0) - for nodeID := range stoppingNodesSegments { + for _, nodeID := range offlineNodes { dmChannels := b.dist.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID) - plans := b.AssignChannel(dmChannels, replica.Replica.GetNodes()) + plans := b.AssignChannel(dmChannels, onlineNodes) for i := range plans { plans[i].From = nodeID plans[i].ReplicaID = replica.ID diff --git a/internal/querycoordv2/balance/rowcount_based_balancer_test.go b/internal/querycoordv2/balance/rowcount_based_balancer_test.go index c61e9ff881..7367b49488 100644 --- a/internal/querycoordv2/balance/rowcount_based_balancer_test.go +++ b/internal/querycoordv2/balance/rowcount_based_balancer_test.go @@ -62,11 +62,11 @@ func (suite *RowCountBasedBalancerTestSuite) SetupTest() { store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - testMeta := meta.NewMeta(idAllocator, store) + nodeManager := session.NewNodeManager() + testMeta := meta.NewMeta(idAllocator, store, nodeManager) testTarget := meta.NewTargetManager(suite.broker, testMeta) distManager := meta.NewDistributionManager() - nodeManager := session.NewNodeManager() suite.mockScheduler = task.NewMockScheduler(suite.T()) suite.balancer = NewRowCountBasedBalancer(suite.mockScheduler, nodeManager, distManager, testMeta, testTarget) } @@ -272,8 +272,10 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() { for i := range c.nodes { nodeInfo := session.NewNodeInfo(c.nodes[i], "127.0.0.1:0") nodeInfo.UpdateStats(session.WithSegmentCnt(c.segmentCnts[i])) + nodeInfo.UpdateStats(session.WithChannelCnt(len(c.distributionChannels[c.nodes[i]]))) nodeInfo.SetState(c.states[i]) suite.balancer.nodeManager.Add(nodeInfo) + suite.balancer.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, c.nodes[i]) } segmentPlans, channelPlans := balancer.Balance() suite.ElementsMatch(c.expectChannelPlans, channelPlans) @@ -283,6 +285,111 @@ func (suite *RowCountBasedBalancerTestSuite) TestBalance() { } +func (suite *RowCountBasedBalancerTestSuite) TestBalanceOutboundNodes() { + cases := []struct { + name string + nodes []int64 + notExistedNodes []int64 + segmentCnts []int + states []session.State + shouldMock bool + distributions map[int64][]*meta.Segment + distributionChannels map[int64][]*meta.DmChannel + expectPlans []SegmentAssignPlan + expectChannelPlans []ChannelAssignPlan + }{ + { + name: "balance out bound nodes", + nodes: []int64{1, 2, 3}, + segmentCnts: []int{1, 2, 2}, + states: []session.State{session.NodeStateNormal, session.NodeStateNormal, session.NodeStateNormal}, + shouldMock: true, + distributions: map[int64][]*meta.Segment{ + 1: {{SegmentInfo: &datapb.SegmentInfo{ID: 1, CollectionID: 1, NumOfRows: 10}, Node: 1}}, + 2: { + {SegmentInfo: &datapb.SegmentInfo{ID: 2, CollectionID: 1, NumOfRows: 20}, Node: 2}, + {SegmentInfo: &datapb.SegmentInfo{ID: 3, CollectionID: 1, NumOfRows: 30}, Node: 2}, + }, + 3: { + {SegmentInfo: &datapb.SegmentInfo{ID: 4, CollectionID: 1, NumOfRows: 10}, Node: 3}, + {SegmentInfo: &datapb.SegmentInfo{ID: 5, CollectionID: 1, NumOfRows: 10}, Node: 3}, + }, + }, + distributionChannels: map[int64][]*meta.DmChannel{ + 2: { + {VchannelInfo: &datapb.VchannelInfo{CollectionID: 1, ChannelName: "v2"}, Node: 2}, + }, + 3: { + {VchannelInfo: &datapb.VchannelInfo{CollectionID: 1, ChannelName: "v3"}, Node: 3}, + }, + }, + expectPlans: []SegmentAssignPlan{ + {Segment: &meta.Segment{SegmentInfo: &datapb.SegmentInfo{ID: 4, CollectionID: 1, NumOfRows: 10}, Node: 3}, From: 3, To: 1, ReplicaID: 1, Weight: weightHigh}, + {Segment: &meta.Segment{SegmentInfo: &datapb.SegmentInfo{ID: 5, CollectionID: 1, NumOfRows: 10}, Node: 3}, From: 3, To: 1, ReplicaID: 1, Weight: weightHigh}, + }, + expectChannelPlans: []ChannelAssignPlan{ + {Channel: &meta.DmChannel{VchannelInfo: &datapb.VchannelInfo{CollectionID: 1, ChannelName: "v3"}, Node: 3}, From: 3, To: 1, ReplicaID: 1, Weight: weightHigh}, + }, + }, + } + + suite.mockScheduler.Mock.On("GetNodeChannelDelta", mock.Anything).Return(0) + for _, c := range cases { + suite.Run(c.name, func() { + suite.SetupSuite() + defer suite.TearDownTest() + balancer := suite.balancer + collection := utils.CreateTestCollection(1, 1) + segments := []*datapb.SegmentBinlogs{ + { + SegmentID: 1, + }, + { + SegmentID: 2, + }, + { + SegmentID: 3, + }, + { + SegmentID: 4, + }, + { + SegmentID: 5, + }, + } + suite.broker.EXPECT().GetRecoveryInfo(mock.Anything, int64(1), int64(1)).Return( + nil, segments, nil) + balancer.targetMgr.UpdateCollectionNextTargetWithPartitions(int64(1), int64(1)) + balancer.targetMgr.UpdateCollectionCurrentTarget(1, 1) + collection.LoadPercentage = 100 + collection.Status = querypb.LoadStatus_Loaded + balancer.meta.CollectionManager.PutCollection(collection) + balancer.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, append(c.nodes, c.notExistedNodes...))) + for node, s := range c.distributions { + balancer.dist.SegmentDistManager.Update(node, s...) + } + for node, v := range c.distributionChannels { + balancer.dist.ChannelDistManager.Update(node, v...) + } + for i := range c.nodes { + nodeInfo := session.NewNodeInfo(c.nodes[i], "127.0.0.1:0") + nodeInfo.UpdateStats(session.WithSegmentCnt(c.segmentCnts[i])) + nodeInfo.UpdateStats(session.WithChannelCnt(len(c.distributionChannels[c.nodes[i]]))) + nodeInfo.SetState(c.states[i]) + suite.balancer.nodeManager.Add(nodeInfo) + } + // make node-3 outbound + err := balancer.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 1) + suite.NoError(err) + err = balancer.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 2) + suite.NoError(err) + segmentPlans, channelPlans := balancer.Balance() + suite.ElementsMatch(c.expectChannelPlans, channelPlans) + suite.ElementsMatch(c.expectPlans, segmentPlans) + }) + } +} + func (suite *RowCountBasedBalancerTestSuite) TestBalanceOnLoadingCollection() { cases := []struct { name string diff --git a/internal/querycoordv2/checkers/channel_checker.go b/internal/querycoordv2/checkers/channel_checker.go index 4c0458c81f..eb639916a1 100644 --- a/internal/querycoordv2/checkers/channel_checker.go +++ b/internal/querycoordv2/checkers/channel_checker.go @@ -27,6 +27,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/task" "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/samber/lo" "go.uber.org/zap" ) @@ -134,7 +135,7 @@ func (c *ChannelChecker) getDmChannelDiff(targetMgr *meta.TargetManager, func (c *ChannelChecker) getChannelDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.DmChannel { dist := make([]*meta.DmChannel, 0) - for _, nodeID := range replica.Nodes.Collect() { + for _, nodeID := range replica.GetNodes() { dist = append(dist, distMgr.ChannelDistManager.GetByCollectionAndNode(replica.GetCollectionID(), nodeID)...) } return dist @@ -170,7 +171,11 @@ func (c *ChannelChecker) findRepeatedChannels(distMgr *meta.DistributionManager, } func (c *ChannelChecker) createChannelLoadTask(ctx context.Context, channels []*meta.DmChannel, replica *meta.Replica) []task.Task { - plans := c.balancer.AssignChannel(channels, replica.Replica.GetNodes()) + outboundNodes := c.meta.ResourceManager.CheckOutboundNodes(replica) + availableNodes := lo.Filter(replica.Replica.GetNodes(), func(node int64, _ int) bool { + return !outboundNodes.Contain(node) + }) + plans := c.balancer.AssignChannel(channels, availableNodes) for i := range plans { plans[i].ReplicaID = replica.GetID() } diff --git a/internal/querycoordv2/checkers/channel_checker_test.go b/internal/querycoordv2/checkers/channel_checker_test.go index 30cf5fbc6d..54321ee774 100644 --- a/internal/querycoordv2/checkers/channel_checker_test.go +++ b/internal/querycoordv2/checkers/channel_checker_test.go @@ -28,6 +28,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/balance" "github.com/milvus-io/milvus/internal/querycoordv2/meta" . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/querycoordv2/task" "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/etcd" @@ -39,6 +40,8 @@ type ChannelCheckerTestSuite struct { checker *ChannelChecker meta *meta.Meta broker *meta.MockBroker + + nodeMgr *session.NodeManager } func (suite *ChannelCheckerTestSuite) SetupSuite() { @@ -62,7 +65,8 @@ func (suite *ChannelCheckerTestSuite) SetupTest() { // meta store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = meta.NewMeta(idAllocator, store) + suite.nodeMgr = session.NewNodeManager() + suite.meta = meta.NewMeta(idAllocator, store, suite.nodeMgr) suite.broker = meta.NewMockBroker(suite.T()) targetManager := meta.NewTargetManager(suite.broker, suite.meta) @@ -98,6 +102,8 @@ func (suite *ChannelCheckerTestSuite) TestLoadChannel() { checker := suite.checker checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1)) checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1})) + suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + checker.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 1) channels := []*datapb.VchannelInfo{ { diff --git a/internal/querycoordv2/checkers/segment_checker.go b/internal/querycoordv2/checkers/segment_checker.go index 6808d2fde3..5ade090ee9 100644 --- a/internal/querycoordv2/checkers/segment_checker.go +++ b/internal/querycoordv2/checkers/segment_checker.go @@ -29,6 +29,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/task" "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/samber/lo" "go.uber.org/zap" ) @@ -144,7 +145,7 @@ func (c *SegmentChecker) getStreamingSegmentDiff(targetMgr *meta.TargetManager, func (c *SegmentChecker) getStreamingSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) map[int64]*meta.Segment { segments := make(map[int64]*meta.Segment, 0) - for _, node := range replica.Nodes.Collect() { + for _, node := range replica.GetNodes() { segmentsOnNodes := distMgr.LeaderViewManager.GetGrowingSegmentDistByCollectionAndNode(replica.CollectionID, node) for k, v := range segmentsOnNodes { segments[k] = v @@ -196,7 +197,7 @@ func (c *SegmentChecker) getHistoricalSegmentDiff(targetMgr *meta.TargetManager, func (c *SegmentChecker) getHistoricalSegmentsDist(distMgr *meta.DistributionManager, replica *meta.Replica) []*meta.Segment { ret := make([]*meta.Segment, 0) - for _, node := range replica.Nodes.Collect() { + for _, node := range replica.GetNodes() { ret = append(ret, distMgr.SegmentDistManager.GetByCollectionAndNode(replica.CollectionID, node)...) } return ret @@ -266,7 +267,11 @@ func (c *SegmentChecker) createSegmentLoadTasks(ctx context.Context, segments [] } packedSegments = append(packedSegments, &meta.Segment{SegmentInfo: s}) } - plans := c.balancer.AssignSegment(packedSegments, replica.Replica.GetNodes()) + outboundNodes := c.meta.ResourceManager.CheckOutboundNodes(replica) + availableNodes := lo.Filter(replica.Replica.GetNodes(), func(node int64, _ int) bool { + return !outboundNodes.Contain(node) + }) + plans := c.balancer.AssignSegment(packedSegments, availableNodes) for i := range plans { plans[i].ReplicaID = replica.GetID() } diff --git a/internal/querycoordv2/checkers/segment_checker_test.go b/internal/querycoordv2/checkers/segment_checker_test.go index dfd5b7bbef..f06af1fe21 100644 --- a/internal/querycoordv2/checkers/segment_checker_test.go +++ b/internal/querycoordv2/checkers/segment_checker_test.go @@ -30,6 +30,7 @@ import ( "github.com/milvus-io/milvus/internal/querycoordv2/balance" "github.com/milvus-io/milvus/internal/querycoordv2/meta" . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/querycoordv2/task" "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/etcd" @@ -41,6 +42,7 @@ type SegmentCheckerTestSuite struct { checker *SegmentChecker meta *meta.Meta broker *meta.MockBroker + nodeMgr *session.NodeManager } func (suite *SegmentCheckerTestSuite) SetupSuite() { @@ -64,7 +66,8 @@ func (suite *SegmentCheckerTestSuite) SetupTest() { // meta store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = meta.NewMeta(idAllocator, store) + suite.nodeMgr = session.NewNodeManager() + suite.meta = meta.NewMeta(idAllocator, store, suite.nodeMgr) distManager := meta.NewDistributionManager() suite.broker = meta.NewMockBroker(suite.T()) targetManager := meta.NewTargetManager(suite.broker, suite.meta) @@ -100,6 +103,10 @@ func (suite *SegmentCheckerTestSuite) TestLoadSegments() { // set meta checker.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1)) checker.meta.ReplicaManager.Put(utils.CreateTestReplica(1, 1, []int64{1, 2})) + suite.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + checker.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 1) + checker.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 2) // set target segments := []*datapb.SegmentBinlogs{ diff --git a/internal/querycoordv2/dist/dist_controller_test.go b/internal/querycoordv2/dist/dist_controller_test.go index 4762a844cd..c08afb9018 100644 --- a/internal/querycoordv2/dist/dist_controller_test.go +++ b/internal/querycoordv2/dist/dist_controller_test.go @@ -65,7 +65,7 @@ func (suite *DistControllerTestSuite) SetupTest() { // meta store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = meta.NewMeta(idAllocator, store) + suite.meta = meta.NewMeta(idAllocator, store, session.NewNodeManager()) suite.mockCluster = session.NewMockCluster(suite.T()) nodeManager := session.NewNodeManager() diff --git a/internal/querycoordv2/handlers.go b/internal/querycoordv2/handlers.go index 6d0ef1e749..9eb43040c6 100644 --- a/internal/querycoordv2/handlers.go +++ b/internal/querycoordv2/handlers.go @@ -48,7 +48,7 @@ import ( func (s *Server) checkAnyReplicaAvailable(collectionID int64) bool { for _, replica := range s.meta.ReplicaManager.GetByCollection(collectionID) { isAvailable := true - for node := range replica.Nodes { + for _, node := range replica.GetNodes() { if s.nodeMgr.Get(node) == nil { isAvailable = false break @@ -94,7 +94,11 @@ func (s *Server) balanceSegments(ctx context.Context, req *querypb.LoadBalanceRe srcNode := req.GetSourceNodeIDs()[0] dstNodeSet := typeutil.NewUniqueSet(req.GetDstNodeIDs()...) if dstNodeSet.Len() == 0 { - dstNodeSet.Insert(replica.GetNodes()...) + outboundNodes := s.meta.ResourceManager.CheckOutboundNodes(replica) + availableNodes := lo.Filter(replica.Replica.GetNodes(), func(node int64, _ int) bool { + return !outboundNodes.Contain(node) + }) + dstNodeSet.Insert(availableNodes...) } dstNodeSet.Remove(srcNode) @@ -302,7 +306,13 @@ func (s *Server) tryGetNodesMetrics(ctx context.Context, req *milvuspb.GetMetric } func (s *Server) fillReplicaInfo(replica *meta.Replica, withShardNodes bool) (*milvuspb.ReplicaInfo, error) { - info := utils.Replica2ReplicaInfo(replica.Replica) + info := &milvuspb.ReplicaInfo{ + ReplicaID: replica.GetID(), + CollectionID: replica.GetCollectionID(), + NodeIds: replica.GetNodes(), + ResourceGroupName: replica.GetResourceGroup(), + NumOutboundNode: s.meta.GetOutgoingNodeNumByReplica(replica), + } channels := s.targetMgr.GetDmChannelsByCollection(replica.GetCollectionID(), meta.CurrentTarget) if len(channels) == 0 { @@ -335,7 +345,7 @@ func (s *Server) fillReplicaInfo(replica *meta.Replica, withShardNodes bool) (*m } if withShardNodes { shardNodes := lo.FilterMap(segments, func(segment *meta.Segment, _ int) (int64, bool) { - if replica.Nodes.Contain(segment.Node) { + if replica.Contains(segment.Node) { return segment.Node, true } return 0, false diff --git a/internal/querycoordv2/job/job.go b/internal/querycoordv2/job/job.go index 643431f8f7..f8ce198e51 100644 --- a/internal/querycoordv2/job/job.go +++ b/internal/querycoordv2/job/job.go @@ -197,10 +197,11 @@ func (job *LoadCollectionJob) Execute() error { } // Create replicas - replicas, err := utils.SpawnReplicas(job.meta.ReplicaManager, - job.nodeMgr, + replicas, err := utils.SpawnReplicasWithRG(job.meta, req.GetCollectionID(), - req.GetReplicaNumber()) + req.GetResourceGroups(), + req.GetReplicaNumber(), + ) if err != nil { msg := "failed to spawn replica for collection" log.Error(msg, zap.Error(err)) @@ -209,7 +210,8 @@ func (job *LoadCollectionJob) Execute() error { for _, replica := range replicas { log.Info("replica created", zap.Int64("replicaID", replica.GetID()), - zap.Int64s("nodes", replica.GetNodes())) + zap.Int64s("nodes", replica.GetNodes()), + zap.String("resourceGroup", replica.GetResourceGroup())) } // Fetch channels and segments from DataCoord @@ -411,10 +413,11 @@ func (job *LoadPartitionJob) Execute() error { } // Create replicas - replicas, err := utils.SpawnReplicas(job.meta.ReplicaManager, - job.nodeMgr, + replicas, err := utils.SpawnReplicasWithRG(job.meta, req.GetCollectionID(), - req.GetReplicaNumber()) + req.GetResourceGroups(), + req.GetReplicaNumber(), + ) if err != nil { msg := "failed to spawn replica for collection" log.Error(msg, zap.Error(err)) @@ -423,7 +426,8 @@ func (job *LoadPartitionJob) Execute() error { for _, replica := range replicas { log.Info("replica created", zap.Int64("replicaID", replica.GetID()), - zap.Int64s("nodes", replica.GetNodes())) + zap.Int64s("nodes", replica.GetNodes()), + zap.String("resourceGroup", replica.GetResourceGroup())) } // It's safe here to call UpdateCollectionNextTargetWithPartitions, as the collection not existing diff --git a/internal/querycoordv2/job/job_test.go b/internal/querycoordv2/job/job_test.go index c4905caf0f..94d5a870cc 100644 --- a/internal/querycoordv2/job/job_test.go +++ b/internal/querycoordv2/job/job_test.go @@ -131,19 +131,29 @@ func (suite *JobSuite) SetupTest() { suite.store = meta.NewMetaStore(suite.kv) suite.dist = meta.NewDistributionManager() - suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store) + suite.nodeMgr = session.NewNodeManager() + suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store, suite.nodeMgr) suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta) suite.targetObserver = observers.NewTargetObserver(suite.meta, suite.targetMgr, suite.dist, suite.broker, ) - suite.nodeMgr = session.NewNodeManager() - suite.nodeMgr.Add(&session.NodeInfo{}) suite.scheduler = NewScheduler() suite.scheduler.Start(context.Background()) meta.GlobalFailedLoadCache = meta.NewFailedLoadCache() + + suite.nodeMgr.Add(session.NewNodeInfo(1000, "localhost")) + suite.nodeMgr.Add(session.NewNodeInfo(2000, "localhost")) + suite.nodeMgr.Add(session.NewNodeInfo(3000, "localhost")) + + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 1000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 2000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 3000) + suite.NoError(err) } func (suite *JobSuite) TearDownTest() { @@ -265,6 +275,48 @@ func (suite *JobSuite) TestLoadCollection() { err := job.Wait() suite.ErrorIs(err, ErrLoadParameterMismatched) } + + suite.meta.ResourceManager.AddResourceGroup("rg1") + suite.meta.ResourceManager.AddResourceGroup("rg2") + suite.meta.ResourceManager.AddResourceGroup("rg3") + + // Load with 3 replica on 1 rg + req := &querypb.LoadCollectionRequest{ + CollectionID: 1001, + ReplicaNumber: 3, + ResourceGroups: []string{"rg1"}, + } + job := NewLoadCollectionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.targetMgr, + suite.broker, + suite.nodeMgr, + ) + suite.scheduler.Add(job) + err := job.Wait() + suite.ErrorContains(err, meta.ErrNodeNotEnough.Error()) + + // Load with 3 replica on 3 rg + req = &querypb.LoadCollectionRequest{ + CollectionID: 1002, + ReplicaNumber: 3, + ResourceGroups: []string{"rg1", "rg2", "rg3"}, + } + job = NewLoadCollectionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.targetMgr, + suite.broker, + suite.nodeMgr, + ) + suite.scheduler.Add(job) + err = job.Wait() + suite.ErrorContains(err, meta.ErrNodeNotEnough.Error()) } func (suite *JobSuite) TestLoadCollectionWithReplicas() { @@ -278,7 +330,7 @@ func (suite *JobSuite) TestLoadCollectionWithReplicas() { // Load with 3 replica req := &querypb.LoadCollectionRequest{ CollectionID: collection, - ReplicaNumber: 3, + ReplicaNumber: 5, } job := NewLoadCollectionJob( ctx, @@ -482,6 +534,50 @@ func (suite *JobSuite) TestLoadPartition() { err := job.Wait() suite.ErrorIs(err, ErrLoadParameterMismatched) } + + suite.meta.ResourceManager.AddResourceGroup("rg1") + suite.meta.ResourceManager.AddResourceGroup("rg2") + suite.meta.ResourceManager.AddResourceGroup("rg3") + + // test load 3 replica in 1 rg, should pass rg check + req := &querypb.LoadPartitionsRequest{ + CollectionID: 100, + PartitionIDs: []int64{1001}, + ReplicaNumber: 3, + ResourceGroups: []string{"rg1"}, + } + job := NewLoadPartitionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.targetMgr, + suite.broker, + suite.nodeMgr, + ) + suite.scheduler.Add(job) + err := job.Wait() + suite.Contains(err.Error(), meta.ErrNodeNotEnough.Error()) + + // test load 3 replica in 3 rg, should pass rg check + req = &querypb.LoadPartitionsRequest{ + CollectionID: 102, + PartitionIDs: []int64{1001}, + ReplicaNumber: 3, + ResourceGroups: []string{"rg1", "rg2", "rg3"}, + } + job = NewLoadPartitionJob( + ctx, + req, + suite.dist, + suite.meta, + suite.targetMgr, + suite.broker, + suite.nodeMgr, + ) + suite.scheduler.Add(job) + err = job.Wait() + suite.Contains(err.Error(), meta.ErrNodeNotEnough.Error()) } func (suite *JobSuite) TestLoadPartitionWithReplicas() { @@ -496,7 +592,7 @@ func (suite *JobSuite) TestLoadPartitionWithReplicas() { req := &querypb.LoadPartitionsRequest{ CollectionID: collection, PartitionIDs: suite.partitions[collection], - ReplicaNumber: 3, + ReplicaNumber: 5, } job := NewLoadPartitionJob( ctx, @@ -707,7 +803,16 @@ func (suite *JobSuite) TestReleasePartition() { func (suite *JobSuite) TestLoadCollectionStoreFailed() { // Store collection failed store := meta.NewMockStore(suite.T()) - suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), store) + suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), store, suite.nodeMgr) + + store.EXPECT().SaveResourceGroup(mock.Anything, mock.Anything).Return(nil) + err := suite.meta.AssignNode(meta.DefaultResourceGroupName, 1000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 2000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 3000) + suite.NoError(err) + for _, collection := range suite.collections { if suite.loadTypes[collection] != querypb.LoadType_LoadCollection { continue @@ -743,8 +848,17 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() { func (suite *JobSuite) TestLoadPartitionStoreFailed() { // Store partition failed store := meta.NewMockStore(suite.T()) - suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), store) - err := errors.New("failed to store collection") + suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), store, suite.nodeMgr) + + store.EXPECT().SaveResourceGroup(mock.Anything, mock.Anything).Return(nil) + err := suite.meta.AssignNode(meta.DefaultResourceGroupName, 1000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 2000) + suite.NoError(err) + err = suite.meta.AssignNode(meta.DefaultResourceGroupName, 3000) + suite.NoError(err) + + err = errors.New("failed to store collection") for _, collection := range suite.collections { if suite.loadTypes[collection] != querypb.LoadType_LoadPartition { continue @@ -775,7 +889,7 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() { func (suite *JobSuite) TestLoadCreateReplicaFailed() { // Store replica failed - suite.meta = meta.NewMeta(ErrorIDAllocator(), suite.store) + suite.meta = meta.NewMeta(ErrorIDAllocator(), suite.store, session.NewNodeManager()) for _, collection := range suite.collections { req := &querypb.LoadCollectionRequest{ CollectionID: collection, diff --git a/internal/querycoordv2/meta/channel_dist_manager.go b/internal/querycoordv2/meta/channel_dist_manager.go index be9fed0f50..babef21935 100644 --- a/internal/querycoordv2/meta/channel_dist_manager.go +++ b/internal/querycoordv2/meta/channel_dist_manager.go @@ -91,7 +91,7 @@ func (m *ChannelDistManager) GetShardLeader(replica *Replica, shard string) (int m.rwmutex.RLock() defer m.rwmutex.RUnlock() - for node := range replica.Nodes { + for _, node := range replica.GetNodes() { channels := m.channels[node] for _, dmc := range channels { if dmc.ChannelName == shard { @@ -108,7 +108,7 @@ func (m *ChannelDistManager) GetShardLeadersByReplica(replica *Replica) map[stri defer m.rwmutex.RUnlock() ret := make(map[string]int64) - for node := range replica.Nodes { + for _, node := range replica.GetNodes() { channels := m.channels[node] for _, dmc := range channels { if dmc.GetCollectionID() == replica.GetCollectionID() { diff --git a/internal/querycoordv2/meta/channel_dist_manager_test.go b/internal/querycoordv2/meta/channel_dist_manager_test.go index b46525a3f6..6a108584d9 100644 --- a/internal/querycoordv2/meta/channel_dist_manager_test.go +++ b/internal/querycoordv2/meta/channel_dist_manager_test.go @@ -100,18 +100,18 @@ func (suite *ChannelDistManagerSuite) TestGetBy() { func (suite *ChannelDistManagerSuite) TestGetShardLeader() { replicas := []*Replica{ - { - Replica: &querypb.Replica{ + NewReplica( + &querypb.Replica{ CollectionID: suite.collection, }, - Nodes: typeutil.NewUniqueSet(suite.nodes[0], suite.nodes[2]), - }, - { - Replica: &querypb.Replica{ + typeutil.NewUniqueSet(suite.nodes[0], suite.nodes[2]), + ), + NewReplica( + &querypb.Replica{ CollectionID: suite.collection, }, - Nodes: typeutil.NewUniqueSet(suite.nodes[1]), - }, + typeutil.NewUniqueSet(suite.nodes[1]), + ), } // Test on replica 0 diff --git a/internal/querycoordv2/meta/meta.go b/internal/querycoordv2/meta/meta.go index 96798618a2..b1b99c8466 100644 --- a/internal/querycoordv2/meta/meta.go +++ b/internal/querycoordv2/meta/meta.go @@ -16,17 +16,22 @@ package meta +import "github.com/milvus-io/milvus/internal/querycoordv2/session" + type Meta struct { *CollectionManager *ReplicaManager + *ResourceManager } func NewMeta( idAllocator func() (int64, error), store Store, + nodeMgr *session.NodeManager, ) *Meta { return &Meta{ NewCollectionManager(store), NewReplicaManager(idAllocator, store), + NewResourceManager(store, nodeMgr), } } diff --git a/internal/querycoordv2/meta/mock_store.go b/internal/querycoordv2/meta/mock_store.go index 7dd9632897..07eacc0b63 100644 --- a/internal/querycoordv2/meta/mock_store.go +++ b/internal/querycoordv2/meta/mock_store.go @@ -155,6 +155,51 @@ func (_c *MockStore_GetReplicas_Call) Return(_a0 []*querypb.Replica, _a1 error) return _c } +// GetResourceGroups provides a mock function with given fields: +func (_m *MockStore) GetResourceGroups() ([]*querypb.ResourceGroup, error) { + ret := _m.Called() + + var r0 []*querypb.ResourceGroup + if rf, ok := ret.Get(0).(func() []*querypb.ResourceGroup); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*querypb.ResourceGroup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockStore_GetResourceGroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetResourceGroups' +type MockStore_GetResourceGroups_Call struct { + *mock.Call +} + +// GetResourceGroups is a helper method to define mock.On call +func (_e *MockStore_Expecter) GetResourceGroups() *MockStore_GetResourceGroups_Call { + return &MockStore_GetResourceGroups_Call{Call: _e.mock.On("GetResourceGroups")} +} + +func (_c *MockStore_GetResourceGroups_Call) Run(run func()) *MockStore_GetResourceGroups_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockStore_GetResourceGroups_Call) Return(_a0 []*querypb.ResourceGroup, _a1 error) *MockStore_GetResourceGroups_Call { + _c.Call.Return(_a0, _a1) + return _c +} + // ReleaseCollection provides a mock function with given fields: id func (_m *MockStore) ReleaseCollection(id int64) error { ret := _m.Called(id) @@ -319,6 +364,43 @@ func (_c *MockStore_ReleaseReplicas_Call) Return(_a0 error) *MockStore_ReleaseRe return _c } +// RemoveResourceGroup provides a mock function with given fields: rgName +func (_m *MockStore) RemoveResourceGroup(rgName string) error { + ret := _m.Called(rgName) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(rgName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockStore_RemoveResourceGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveResourceGroup' +type MockStore_RemoveResourceGroup_Call struct { + *mock.Call +} + +// RemoveResourceGroup is a helper method to define mock.On call +// - rgName string +func (_e *MockStore_Expecter) RemoveResourceGroup(rgName interface{}) *MockStore_RemoveResourceGroup_Call { + return &MockStore_RemoveResourceGroup_Call{Call: _e.mock.On("RemoveResourceGroup", rgName)} +} + +func (_c *MockStore_RemoveResourceGroup_Call) Run(run func(rgName string)) *MockStore_RemoveResourceGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockStore_RemoveResourceGroup_Call) Return(_a0 error) *MockStore_RemoveResourceGroup_Call { + _c.Call.Return(_a0) + return _c +} + // SaveCollection provides a mock function with given fields: info func (_m *MockStore) SaveCollection(info *querypb.CollectionLoadInfo) error { ret := _m.Called(info) @@ -443,6 +525,56 @@ func (_c *MockStore_SaveReplica_Call) Return(_a0 error) *MockStore_SaveReplica_C return _c } +// SaveResourceGroup provides a mock function with given fields: rgs +func (_m *MockStore) SaveResourceGroup(rgs ...*querypb.ResourceGroup) error { + _va := make([]interface{}, len(rgs)) + for _i := range rgs { + _va[_i] = rgs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func(...*querypb.ResourceGroup) error); ok { + r0 = rf(rgs...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockStore_SaveResourceGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveResourceGroup' +type MockStore_SaveResourceGroup_Call struct { + *mock.Call +} + +// SaveResourceGroup is a helper method to define mock.On call +// - rgs ...*querypb.ResourceGroup +func (_e *MockStore_Expecter) SaveResourceGroup(rgs ...interface{}) *MockStore_SaveResourceGroup_Call { + return &MockStore_SaveResourceGroup_Call{Call: _e.mock.On("SaveResourceGroup", + append([]interface{}{}, rgs...)...)} +} + +func (_c *MockStore_SaveResourceGroup_Call) Run(run func(rgs ...*querypb.ResourceGroup)) *MockStore_SaveResourceGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]*querypb.ResourceGroup, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(*querypb.ResourceGroup) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockStore_SaveResourceGroup_Call) Return(_a0 error) *MockStore_SaveResourceGroup_Call { + _c.Call.Return(_a0) + return _c +} + type mockConstructorTestingTNewMockStore interface { mock.TestingT Cleanup(func()) diff --git a/internal/querycoordv2/meta/replica_manager.go b/internal/querycoordv2/meta/replica_manager.go index 48c9d37572..36cce16c65 100644 --- a/internal/querycoordv2/meta/replica_manager.go +++ b/internal/querycoordv2/meta/replica_manager.go @@ -30,23 +30,66 @@ import ( type Replica struct { *querypb.Replica - Nodes UniqueSet // a helper field for manipulating replica's Nodes slice field + nodes UniqueSet // a helper field for manipulating replica's Nodes slice field + rwmutex sync.RWMutex +} + +func NewReplica(replica *querypb.Replica, nodes UniqueSet) *Replica { + return &Replica{ + Replica: replica, + nodes: nodes, + } } func (replica *Replica) AddNode(nodes ...int64) { - replica.Nodes.Insert(nodes...) - replica.Replica.Nodes = replica.Nodes.Collect() + replica.rwmutex.Lock() + defer replica.rwmutex.Unlock() + replica.nodes.Insert(nodes...) + replica.Replica.Nodes = replica.nodes.Collect() +} + +func (replica *Replica) GetNodes() []int64 { + replica.rwmutex.RLock() + defer replica.rwmutex.RUnlock() + if replica != nil { + return replica.nodes.Collect() + } + return nil +} + +func (replica *Replica) Len() int { + replica.rwmutex.RLock() + defer replica.rwmutex.RUnlock() + if replica != nil { + return replica.nodes.Len() + } + + return 0 +} + +func (replica *Replica) Contains(node int64) bool { + replica.rwmutex.RLock() + defer replica.rwmutex.RUnlock() + if replica != nil { + return replica.nodes.Contain(node) + } + + return false } func (replica *Replica) RemoveNode(nodes ...int64) { - replica.Nodes.Remove(nodes...) - replica.Replica.Nodes = replica.Nodes.Collect() + replica.rwmutex.Lock() + defer replica.rwmutex.Unlock() + replica.nodes.Remove(nodes...) + replica.Replica.Nodes = replica.nodes.Collect() } func (replica *Replica) Clone() *Replica { + replica.rwmutex.RLock() + defer replica.rwmutex.RUnlock() return &Replica{ Replica: proto.Clone(replica.Replica).(*querypb.Replica), - Nodes: NewUniqueSet(replica.Replica.Nodes...), + nodes: NewUniqueSet(replica.Replica.Nodes...), } } @@ -75,10 +118,14 @@ func (m *ReplicaManager) Recover(collections []int64) error { collectionSet := typeutil.NewUniqueSet(collections...) for _, replica := range replicas { + if len(replica.GetResourceGroup()) == 0 { + replica.ResourceGroup = DefaultResourceGroupName + } + if collectionSet.Contain(replica.GetCollectionID()) { m.replicas[replica.GetID()] = &Replica{ Replica: replica, - Nodes: NewUniqueSet(replica.GetNodes()...), + nodes: NewUniqueSet(replica.GetNodes()...), } log.Info("recover replica", zap.Int64("collectionID", replica.GetCollectionID()), @@ -109,13 +156,13 @@ func (m *ReplicaManager) Get(id UniqueID) *Replica { // Spawn spawns replicas of the given number, for given collection, // this doesn't store these replicas and assign nodes to them. -func (m *ReplicaManager) Spawn(collection int64, replicaNumber int32) ([]*Replica, error) { +func (m *ReplicaManager) Spawn(collection int64, replicaNumber int32, rgName string) ([]*Replica, error) { var ( replicas = make([]*Replica, replicaNumber) err error ) for i := range replicas { - replicas[i], err = m.spawn(collection) + replicas[i], err = m.spawn(collection, rgName) if err != nil { return nil, err } @@ -130,17 +177,18 @@ func (m *ReplicaManager) Put(replicas ...*Replica) error { return m.put(replicas...) } -func (m *ReplicaManager) spawn(collectionID UniqueID) (*Replica, error) { +func (m *ReplicaManager) spawn(collectionID UniqueID, rgName string) (*Replica, error) { id, err := m.idAllocator() if err != nil { return nil, err } return &Replica{ Replica: &querypb.Replica{ - ID: id, - CollectionID: collectionID, + ID: id, + CollectionID: collectionID, + ResourceGroup: rgName, }, - Nodes: make(UniqueSet), + nodes: make(UniqueSet), }, nil } @@ -192,7 +240,7 @@ func (m *ReplicaManager) GetByCollectionAndNode(collectionID, nodeID UniqueID) * defer m.rwmutex.RUnlock() for _, replica := range m.replicas { - if replica.CollectionID == collectionID && replica.Nodes.Contain(nodeID) { + if replica.CollectionID == collectionID && replica.nodes.Contain(nodeID) { return replica } } @@ -200,6 +248,34 @@ func (m *ReplicaManager) GetByCollectionAndNode(collectionID, nodeID UniqueID) * return nil } +func (m *ReplicaManager) GetByCollectionAndRG(collectionID int64, rgName string) []*Replica { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + ret := make([]*Replica, 0) + for _, replica := range m.replicas { + if replica.GetCollectionID() == collectionID && replica.GetResourceGroup() == rgName { + ret = append(ret, replica) + } + } + + return ret +} + +func (m *ReplicaManager) GetByResourceGroup(rgName string) []*Replica { + m.rwmutex.RLock() + defer m.rwmutex.RUnlock() + + ret := make([]*Replica, 0) + for _, replica := range m.replicas { + if replica.GetResourceGroup() == rgName { + ret = append(ret, replica) + } + } + + return ret +} + func (m *ReplicaManager) AddNode(replicaID UniqueID, nodes ...UniqueID) error { m.rwmutex.Lock() defer m.rwmutex.Unlock() @@ -227,3 +303,17 @@ func (m *ReplicaManager) RemoveNode(replicaID UniqueID, nodes ...UniqueID) error replica.RemoveNode(nodes...) return m.put(replica) } + +func (m *ReplicaManager) GetResourceGroupByCollection(collection UniqueID) typeutil.Set[string] { + m.rwmutex.Lock() + defer m.rwmutex.Unlock() + + ret := typeutil.NewSet[string]() + for _, r := range m.replicas { + if r.GetCollectionID() == collection { + ret.Insert(r.GetResourceGroup()) + } + } + + return ret +} diff --git a/internal/querycoordv2/meta/replica_manager_test.go b/internal/querycoordv2/meta/replica_manager_test.go index b065866f4b..df35dbd3bb 100644 --- a/internal/querycoordv2/meta/replica_manager_test.go +++ b/internal/querycoordv2/meta/replica_manager_test.go @@ -76,14 +76,14 @@ func (suite *ReplicaManagerSuite) TestSpawn() { mgr := suite.mgr for i, collection := range suite.collections { - replicas, err := mgr.Spawn(collection, suite.replicaNumbers[i]) + replicas, err := mgr.Spawn(collection, suite.replicaNumbers[i], DefaultResourceGroupName) suite.NoError(err) suite.Len(replicas, int(suite.replicaNumbers[i])) } mgr.idAllocator = ErrorIDAllocator() for i, collection := range suite.collections { - _, err := mgr.Spawn(collection, suite.replicaNumbers[i]) + _, err := mgr.Spawn(collection, suite.replicaNumbers[i], DefaultResourceGroupName) suite.Error(err) } } @@ -98,8 +98,8 @@ func (suite *ReplicaManagerSuite) TestGet() { for _, replica := range replicas { suite.Equal(collection, replica.GetCollectionID()) suite.Equal(replica, mgr.Get(replica.GetID())) - suite.Equal(replica.Replica.Nodes, replica.Nodes.Collect()) - replicaNodes[replica.GetID()] = replica.Replica.Nodes + suite.Equal(replica.Replica.GetNodes(), replica.GetNodes()) + replicaNodes[replica.GetID()] = replica.Replica.GetNodes() nodes = append(nodes, replica.Replica.Nodes...) } suite.Len(nodes, int(suite.replicaNumbers[i])) @@ -137,9 +137,9 @@ func (suite *ReplicaManagerSuite) TestRecover() { suite.NotNil(replica) suite.EqualValues(1000, replica.CollectionID) suite.EqualValues([]int64{1, 2, 3}, replica.Replica.Nodes) - suite.Len(replica.Nodes, len(replica.Replica.GetNodes())) + suite.Len(replica.GetNodes(), len(replica.Replica.GetNodes())) for _, node := range replica.Replica.GetNodes() { - suite.True(replica.Nodes.Contain(node)) + suite.True(replica.Contains(node)) } } @@ -175,7 +175,7 @@ func (suite *ReplicaManagerSuite) TestNodeManipulate() { suite.NoError(err) replica = mgr.GetByCollectionAndNode(collection, newNode) - suite.Contains(replica.Nodes, newNode) + suite.Contains(replica.GetNodes(), newNode) suite.Contains(replica.Replica.GetNodes(), newNode) err = mgr.RemoveNode(replica.GetID(), firstNode) @@ -192,7 +192,7 @@ func (suite *ReplicaManagerSuite) TestNodeManipulate() { suite.Nil(replica) replica = mgr.GetByCollectionAndNode(collection, newNode) - suite.Contains(replica.Nodes, newNode) + suite.Contains(replica.GetNodes(), newNode) suite.Contains(replica.Replica.GetNodes(), newNode) } } @@ -201,7 +201,7 @@ func (suite *ReplicaManagerSuite) spawnAndPutAll() { mgr := suite.mgr for i, collection := range suite.collections { - replicas, err := mgr.Spawn(collection, suite.replicaNumbers[i]) + replicas, err := mgr.Spawn(collection, suite.replicaNumbers[i], DefaultResourceGroupName) suite.NoError(err) suite.Len(replicas, int(suite.replicaNumbers[i])) for j, replica := range replicas { @@ -212,6 +212,27 @@ func (suite *ReplicaManagerSuite) spawnAndPutAll() { } } +func (suite *ReplicaManagerSuite) TestResourceGroup() { + mgr := NewReplicaManager(suite.idAllocator, suite.store) + replica1, err := mgr.spawn(int64(1000), DefaultResourceGroupName) + replica1.AddNode(1) + suite.NoError(err) + mgr.Put(replica1) + + replica2, err := mgr.spawn(int64(2000), DefaultResourceGroupName) + replica2.AddNode(1) + suite.NoError(err) + mgr.Put(replica2) + + replicas := mgr.GetByResourceGroup(DefaultResourceGroupName) + suite.Len(replicas, 2) + replicas = mgr.GetByCollectionAndRG(int64(1000), DefaultResourceGroupName) + suite.Len(replicas, 1) + rgNames := mgr.GetResourceGroupByCollection(int64(1000)) + suite.Len(rgNames, 1) + suite.True(rgNames.Contain(DefaultResourceGroupName)) +} + func (suite *ReplicaManagerSuite) clearMemory() { suite.mgr.replicas = make(map[int64]*Replica) } diff --git a/internal/querycoordv2/meta/resource_manager.go b/internal/querycoordv2/meta/resource_manager.go new file mode 100644 index 0000000000..f6dd8a3861 --- /dev/null +++ b/internal/querycoordv2/meta/resource_manager.go @@ -0,0 +1,632 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package meta + +import ( + "errors" + "sync" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/util/typeutil" + . "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/samber/lo" + "go.uber.org/zap" +) + +var ( + ErrNodeAlreadyAssign = errors.New("node already assign to other resource group") + ErrRGIsFull = errors.New("resource group is full") + ErrRGIsEmpty = errors.New("resource group is empty") + ErrRGNotExist = errors.New("resource group doesn't exist") + ErrRGAlreadyExist = errors.New("resource group already exist") + ErrRGAssignNodeFailed = errors.New("failed to assign node to resource group") + ErrRGUnAssignNodeFailed = errors.New("failed to unassign node from resource group") + ErrSaveResourceGroupToStore = errors.New("failed to save resource group to store") + ErrRemoveResourceGroupFromStore = errors.New("failed to remove resource group from store") + ErrRecoverResourceGroupToStore = errors.New("failed to recover resource group to store") + ErrNodeNotAssignToRG = errors.New("node hasn't been assign to any resource group") + ErrRGNameIsEmpty = errors.New("resource group name couldn't be empty") + ErrDeleteDefaultRG = errors.New("delete default rg is not permitted") + ErrDeleteNonEmptyRG = errors.New("delete non-empty rg is not permitted") + ErrNodeNotExist = errors.New("node does not exist") + ErrNodeStopped = errors.New("node has been stopped") + ErrRGLimit = errors.New("resource group num reach limit 1024") + ErrNodeNotEnough = errors.New("nodes not enough") +) + +var DefaultResourceGroupName = "__default_resource_group" + +type ResourceGroup struct { + nodes UniqueSet + capacity int +} + +func NewResourceGroup(capacity int) *ResourceGroup { + rg := &ResourceGroup{ + nodes: typeutil.NewUniqueSet(), + capacity: capacity, + } + + return rg +} + +// assign node to resource group +func (rg *ResourceGroup) assignNode(id int64) error { + if rg.containsNode(id) { + return ErrNodeAlreadyAssign + } + + rg.nodes.Insert(id) + rg.capacity++ + + return nil +} + +// unassign node from resource group +func (rg *ResourceGroup) unassignNode(id int64) error { + if !rg.containsNode(id) { + // remove non exist node should be tolerable + return nil + } + + rg.nodes.Remove(id) + rg.capacity-- + + return nil +} + +func (rg *ResourceGroup) handleNodeUp(id int64) error { + if rg.LackOfNodes() == 0 { + return ErrRGIsFull + } + + if rg.containsNode(id) { + return ErrNodeAlreadyAssign + } + + rg.nodes.Insert(id) + return nil +} + +func (rg *ResourceGroup) handleNodeDown(id int64) error { + if !rg.containsNode(id) { + // remove non exist node should be tolerable + return nil + } + + rg.nodes.Remove(id) + return nil +} + +func (rg *ResourceGroup) LackOfNodes() int { + return rg.capacity - len(rg.nodes) +} + +func (rg *ResourceGroup) containsNode(id int64) bool { + return rg.nodes.Contain(id) +} + +func (rg *ResourceGroup) GetNodes() []int64 { + return rg.nodes.Collect() +} + +func (rg *ResourceGroup) GetCapacity() int { + return rg.capacity +} + +type ResourceManager struct { + groups map[string]*ResourceGroup + store Store + nodeMgr *session.NodeManager + + rwmutex sync.RWMutex +} + +func NewResourceManager(store Store, nodeMgr *session.NodeManager) *ResourceManager { + groupMap := make(map[string]*ResourceGroup) + groupMap[DefaultResourceGroupName] = NewResourceGroup(1000000) + return &ResourceManager{ + groups: groupMap, + store: store, + nodeMgr: nodeMgr, + } +} + +func (rm *ResourceManager) AddResourceGroup(rgName string) error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + if len(rgName) == 0 { + return ErrRGNameIsEmpty + } + + if rm.groups[rgName] != nil { + return ErrRGAlreadyExist + } + + if len(rm.groups) >= 1024 { + return ErrRGLimit + } + + err := rm.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: rgName, + Capacity: 0, + }) + if err != nil { + log.Info("failed to add resource group", + zap.String("rgName", rgName), + zap.Error(err), + ) + return err + } + rm.groups[rgName] = NewResourceGroup(0) + + log.Info("add resource group", + zap.String("rgName", rgName), + ) + return nil +} + +func (rm *ResourceManager) RemoveResourceGroup(rgName string) error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + if rgName == DefaultResourceGroupName { + return ErrDeleteDefaultRG + } + + if rm.groups[rgName] == nil { + // delete a non-exist rg should be tolerable + return nil + } + + if rm.groups[rgName].GetCapacity() != 0 { + return ErrDeleteNonEmptyRG + } + + err := rm.store.RemoveResourceGroup(rgName) + if err != nil { + log.Info("failed to remove resource group", + zap.String("rgName", rgName), + zap.Error(err), + ) + return err + } + delete(rm.groups, rgName) + + log.Info("remove resource group", + zap.String("rgName", rgName), + ) + return nil +} + +func (rm *ResourceManager) AssignNode(rgName string, node int64) error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + return rm.assignNode(rgName, node) +} + +func (rm *ResourceManager) assignNode(rgName string, node int64) error { + if rm.groups[rgName] == nil { + return ErrRGNotExist + } + + if rm.nodeMgr.Get(node) == nil { + return ErrNodeNotExist + } + + if ok, _ := rm.nodeMgr.IsStoppingNode(node); ok { + return ErrNodeStopped + } + + rm.checkRGNodeStatus(rgName) + if rm.checkNodeAssigned(node) { + return ErrNodeAlreadyAssign + } + + newNodes := rm.groups[rgName].GetNodes() + newNodes = append(newNodes, node) + err := rm.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: rgName, + Capacity: int32(rm.groups[rgName].GetCapacity()) + 1, + Nodes: newNodes, + }) + if err != nil { + log.Info("failed to add node to resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + zap.Error(err), + ) + return err + } + + err = rm.groups[rgName].assignNode(node) + if err != nil { + return err + } + + log.Info("add node to resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + ) + + return nil +} + +func (rm *ResourceManager) checkNodeAssigned(node int64) bool { + for _, group := range rm.groups { + if group.containsNode(node) { + return true + } + } + + return false +} + +func (rm *ResourceManager) UnassignNode(rgName string, node int64) error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + + return rm.unassignNode(rgName, node) +} + +func (rm *ResourceManager) unassignNode(rgName string, node int64) error { + if rm.groups[rgName] == nil { + return ErrRGNotExist + } + + if rm.nodeMgr.Get(node) == nil { + // remove non exist node should be tolerable + return nil + } + + newNodes := make([]int64, 0) + for nid := range rm.groups[rgName].nodes { + if nid != node { + newNodes = append(newNodes, nid) + } + } + + err := rm.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: rgName, + Capacity: int32(rm.groups[rgName].GetCapacity()) - 1, + Nodes: newNodes, + }) + if err != nil { + log.Info("remove node from resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + zap.Error(err), + ) + return err + } + + rm.checkRGNodeStatus(rgName) + err = rm.groups[rgName].unassignNode(node) + if err != nil { + return err + } + + log.Info("remove node from resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + ) + + return nil +} + +func (rm *ResourceManager) GetNodes(rgName string) ([]int64, error) { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + if rm.groups[rgName] == nil { + return nil, ErrRGNotExist + } + + rm.checkRGNodeStatus(rgName) + + return rm.groups[rgName].GetNodes(), nil +} + +// return all outbound node +func (rm *ResourceManager) CheckOutboundNodes(replica *Replica) typeutil.UniqueSet { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + + if rm.groups[replica.GetResourceGroup()] == nil { + return typeutil.NewUniqueSet() + } + rg := rm.groups[replica.GetResourceGroup()] + + ret := typeutil.NewUniqueSet() + for _, node := range replica.GetNodes() { + if !rg.containsNode(node) { + ret.Insert(node) + } + } + + return ret +} + +// return outgoing node num on each rg from this replica +func (rm *ResourceManager) GetOutgoingNodeNumByReplica(replica *Replica) map[string]int32 { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + + if rm.groups[replica.GetResourceGroup()] == nil { + return nil + } + + rg := rm.groups[replica.GetResourceGroup()] + ret := make(map[string]int32) + for _, node := range replica.GetNodes() { + if !rg.containsNode(node) { + rgName, err := rm.findResourceGroupByNode(node) + if err == nil { + ret[rgName]++ + } + } + } + + return ret +} + +func (rm *ResourceManager) ContainsNode(rgName string, node int64) bool { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + if rm.groups[rgName] == nil { + return false + } + + rm.checkRGNodeStatus(rgName) + return rm.groups[rgName].containsNode(node) +} + +func (rm *ResourceManager) ContainResourceGroup(rgName string) bool { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + return rm.groups[rgName] != nil +} + +func (rm *ResourceManager) GetResourceGroup(rgName string) (*ResourceGroup, error) { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + + if rm.groups[rgName] == nil { + return nil, ErrRGNotExist + } + + rm.checkRGNodeStatus(rgName) + return rm.groups[rgName], nil +} + +func (rm *ResourceManager) ListResourceGroups() []string { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + + return lo.Keys(rm.groups) +} + +func (rm *ResourceManager) FindResourceGroupByNode(node int64) (string, error) { + rm.rwmutex.RLock() + defer rm.rwmutex.RUnlock() + + return rm.findResourceGroupByNode(node) +} + +func (rm *ResourceManager) findResourceGroupByNode(node int64) (string, error) { + for name, group := range rm.groups { + if group.containsNode(node) { + return name, nil + } + } + + return "", ErrNodeNotAssignToRG +} + +func (rm *ResourceManager) HandleNodeUp(node int64) (string, error) { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + + if rm.nodeMgr.Get(node) == nil { + return "", ErrNodeNotExist + } + + if ok, _ := rm.nodeMgr.IsStoppingNode(node); ok { + return "", ErrNodeStopped + } + + // if node already assign to rg + rgName, err := rm.findResourceGroupByNode(node) + if err == nil { + log.Info("HandleNodeUp: node already assign to resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + ) + return rgName, nil + } + + // add new node to default rg + rm.groups[DefaultResourceGroupName].handleNodeUp(node) + log.Info("HandleNodeUp: assign node to default resource group", + zap.String("rgName", DefaultResourceGroupName), + zap.Int64("node", node), + ) + return DefaultResourceGroupName, nil +} + +func (rm *ResourceManager) HandleNodeDown(node int64) (string, error) { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + + if rm.nodeMgr.Get(node) == nil { + return "", ErrNodeNotExist + } + + rgName, err := rm.findResourceGroupByNode(node) + if err == nil { + log.Info("HandleNodeDown: remove node from resource group", + zap.String("rgName", rgName), + zap.Int64("node", node), + ) + return rgName, rm.groups[rgName].handleNodeDown(node) + } + + return "", ErrNodeNotAssignToRG +} + +func (rm *ResourceManager) TransferNode(from, to string) error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + + if rm.groups[from] == nil || rm.groups[to] == nil { + return ErrRGNotExist + } + + if len(rm.groups[from].nodes) == 0 { + return ErrRGIsEmpty + } + + rm.checkRGNodeStatus(from) + rm.checkRGNodeStatus(to) + + //todo: a better way to choose a node with least balance cost + node := rm.groups[from].GetNodes()[0] + if err := rm.transferNodeInStore(from, to, node); err != nil { + return err + } + + err := rm.groups[from].unassignNode(node) + if err != nil { + // interrupt transfer, unreachable logic path + return err + } + + err = rm.groups[to].assignNode(node) + if err != nil { + // interrupt transfer, unreachable logic path + return err + } + + return nil +} + +func (rm *ResourceManager) transferNodeInStore(from string, to string, node int64) error { + fromNodeList := make([]int64, 0) + for nid := range rm.groups[from].nodes { + if nid != node { + fromNodeList = append(fromNodeList, nid) + } + } + toNodeList := rm.groups[to].GetNodes() + toNodeList = append(toNodeList, node) + + fromRG := &querypb.ResourceGroup{ + Name: from, + Capacity: int32(rm.groups[from].GetCapacity()) - 1, + Nodes: fromNodeList, + } + + toRG := &querypb.ResourceGroup{ + Name: to, + Capacity: int32(rm.groups[to].GetCapacity()) + 1, + Nodes: toNodeList, + } + + return rm.store.SaveResourceGroup(fromRG, toRG) +} + +// auto recover rg, return recover used node num +func (rm *ResourceManager) AutoRecoverResourceGroup(rgName string) (int, error) { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + + if rm.groups[rgName] == nil { + return 0, ErrRGNotExist + } + + rm.checkRGNodeStatus(rgName) + lackNodesNum := rm.groups[rgName].LackOfNodes() + nodesInDefault := rm.groups[DefaultResourceGroupName].GetNodes() + for i := 0; i < len(nodesInDefault) && i < lackNodesNum; i++ { + //todo: a better way to choose a node with least balance cost + node := nodesInDefault[i] + err := rm.unassignNode(DefaultResourceGroupName, node) + if err != nil { + // interrupt transfer, unreachable logic path + return i + 1, err + } + + err = rm.groups[rgName].handleNodeUp(node) + if err != nil { + // roll back, unreachable logic path + rm.assignNode(DefaultResourceGroupName, node) + } + } + + return lackNodesNum, nil +} + +func (rm *ResourceManager) Recover() error { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + rgs, err := rm.store.GetResourceGroups() + if err != nil { + return ErrRecoverResourceGroupToStore + } + + for _, rg := range rgs { + rm.groups[rg.GetName()] = NewResourceGroup(0) + for _, node := range rg.GetNodes() { + rm.groups[rg.GetName()].assignNode(node) + } + rm.checkRGNodeStatus(rg.GetName()) + log.Info("Recover resource group", + zap.String("rgName", rg.GetName()), + zap.Int64s("nodes", rg.GetNodes()), + zap.Int32("capacity", rg.GetCapacity()), + ) + } + + return nil +} + +// every operation which involves nodes access, should check nodes status first +func (rm *ResourceManager) checkRGNodeStatus(rgName string) { + for _, node := range rm.groups[rgName].GetNodes() { + if rm.nodeMgr.Get(node) == nil { + log.Info("found node down, remove it", + zap.String("rgName", rgName), + zap.Int64("nodeID", node), + ) + + rm.groups[rgName].handleNodeDown(node) + } + } +} + +// return lack of nodes num +func (rm *ResourceManager) CheckLackOfNode(rgName string) int { + rm.rwmutex.Lock() + defer rm.rwmutex.Unlock() + if rm.groups[rgName] == nil { + return 0 + } + + rm.checkRGNodeStatus(rgName) + + return rm.groups[rgName].LackOfNodes() +} diff --git a/internal/querycoordv2/meta/resource_manager_test.go b/internal/querycoordv2/meta/resource_manager_test.go new file mode 100644 index 0000000000..e48b152efc --- /dev/null +++ b/internal/querycoordv2/meta/resource_manager_test.go @@ -0,0 +1,294 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package meta + +import ( + "testing" + + etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" + "github.com/milvus-io/milvus/internal/proto/querypb" + . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/util/etcd" + "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/stretchr/testify/suite" +) + +type ResourceManagerSuite struct { + suite.Suite + + kv *etcdkv.EtcdKV + manager *ResourceManager +} + +func (suite *ResourceManagerSuite) SetupSuite() { + Params.Init() +} + +func (suite *ResourceManagerSuite) SetupTest() { + config := GenerateEtcdConfig() + cli, err := etcd.GetEtcdClient( + config.UseEmbedEtcd.GetAsBool(), + config.EtcdUseSSL.GetAsBool(), + config.Endpoints.GetAsStrings(), + config.EtcdTLSCert.GetValue(), + config.EtcdTLSKey.GetValue(), + config.EtcdTLSCACert.GetValue(), + config.EtcdTLSMinVersion.GetValue()) + suite.Require().NoError(err) + suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath.GetValue()) + + store := NewMetaStore(suite.kv) + suite.manager = NewResourceManager(store, session.NewNodeManager()) +} + +func (suite *ResourceManagerSuite) TestManipulateResourceGroup() { + // test add rg + err := suite.manager.AddResourceGroup("rg1") + suite.NoError(err) + suite.True(suite.manager.ContainResourceGroup("rg1")) + suite.Len(suite.manager.ListResourceGroups(), 2) + + // test add duplicate rg + err = suite.manager.AddResourceGroup("rg1") + suite.ErrorIs(err, ErrRGAlreadyExist) + // test delete rg + err = suite.manager.RemoveResourceGroup("rg1") + suite.NoError(err) + + // test delete rg which doesn't exist + err = suite.manager.RemoveResourceGroup("rg1") + suite.NoError(err) + // test delete default rg + err = suite.manager.RemoveResourceGroup(DefaultResourceGroupName) + suite.ErrorIs(ErrDeleteDefaultRG, err) +} + +func (suite *ResourceManagerSuite) TestManipulateNode() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + err := suite.manager.AddResourceGroup("rg1") + suite.NoError(err) + // test add node to rg + err = suite.manager.AssignNode("rg1", 1) + suite.NoError(err) + + // test add non-exist node to rg + err = suite.manager.AssignNode("rg1", 2) + suite.ErrorIs(err, ErrNodeNotExist) + + // test add node to non-exist rg + err = suite.manager.AssignNode("rg2", 1) + suite.ErrorIs(err, ErrRGNotExist) + + // test remove node from rg + err = suite.manager.UnassignNode("rg1", 1) + suite.NoError(err) + + // test remove non-exist node from rg + err = suite.manager.UnassignNode("rg1", 2) + suite.NoError(err) + + // test remove node from non-exist rg + err = suite.manager.UnassignNode("rg2", 1) + suite.ErrorIs(err, ErrRGNotExist) + + // add node which already assign to rg to another rg + err = suite.manager.AddResourceGroup("rg2") + suite.NoError(err) + err = suite.manager.AssignNode("rg1", 1) + suite.NoError(err) + err = suite.manager.AssignNode("rg2", 1) + println(err.Error()) + suite.ErrorIs(err, ErrNodeAlreadyAssign) + + // transfer node between rgs + err = suite.manager.TransferNode("rg1", "rg2") + suite.NoError(err) + + // transfer meet non exist rg + err = suite.manager.TransferNode("rgggg", "rg2") + suite.ErrorIs(err, ErrRGNotExist) +} + +func (suite *ResourceManagerSuite) TestHandleNodeUp() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(100, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(101, "localhost")) + err := suite.manager.AddResourceGroup("rg1") + suite.NoError(err) + + suite.manager.AssignNode("rg1", 1) + suite.manager.AssignNode("rg1", 2) + suite.manager.AssignNode("rg1", 3) + + // test query node id not change, expect assign back to origin rg + rg, err := suite.manager.GetResourceGroup("rg1") + suite.NoError(err) + suite.Equal(rg.GetCapacity(), 3) + suite.Equal(len(rg.GetNodes()), 3) + suite.manager.HandleNodeUp(1) + suite.Equal(rg.GetCapacity(), 3) + suite.Equal(len(rg.GetNodes()), 3) + + suite.manager.HandleNodeDown(2) + rg, err = suite.manager.GetResourceGroup("rg1") + suite.NoError(err) + suite.Equal(rg.GetCapacity(), 3) + suite.Equal(len(rg.GetNodes()), 2) + suite.NoError(err) + defaultRG, err := suite.manager.GetResourceGroup(DefaultResourceGroupName) + suite.NoError(err) + oldNodesNum := len(defaultRG.GetNodes()) + suite.manager.HandleNodeUp(101) + rg, err = suite.manager.GetResourceGroup("rg1") + suite.NoError(err) + suite.Equal(rg.GetCapacity(), 3) + suite.Equal(len(rg.GetNodes()), 2) + suite.False(suite.manager.ContainsNode("rg1", 101)) + nodes, err := suite.manager.GetNodes(DefaultResourceGroupName) + suite.NoError(err) + suite.Equal(len(nodes), oldNodesNum+1) +} + +func (suite *ResourceManagerSuite) TestRecover() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + err := suite.manager.AddResourceGroup("rg") + suite.NoError(err) + + suite.manager.AssignNode("rg", 1) + suite.manager.AssignNode("rg", 2) + suite.manager.AssignNode("rg", 3) + + suite.manager.UnassignNode("rg", 3) + + // clear resource manager in hack way + delete(suite.manager.groups, "rg") + delete(suite.manager.groups, DefaultResourceGroupName) + suite.manager.Recover() + + rg, err := suite.manager.GetResourceGroup("rg") + suite.NoError(err) + suite.Equal(2, rg.GetCapacity()) + suite.True(suite.manager.ContainsNode("rg", 1)) + suite.True(suite.manager.ContainsNode("rg", 2)) + suite.False(suite.manager.ContainsNode("rg", 3)) +} + +func (suite *ResourceManagerSuite) TestCheckOutboundNodes() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + err := suite.manager.AddResourceGroup("rg") + suite.NoError(err) + suite.manager.AssignNode("rg", 1) + suite.manager.AssignNode("rg", 2) + suite.manager.AssignNode("rg", 3) + + replica := NewReplica( + &querypb.Replica{ + ID: 1, + CollectionID: 1, + Nodes: []int64{1, 2, 3, 4}, + ResourceGroup: "rg", + }, + typeutil.NewUniqueSet(1, 2, 3, 4), + ) + + outboundNodes := suite.manager.CheckOutboundNodes(replica) + suite.Len(outboundNodes, 1) + suite.True(outboundNodes.Contain(4)) +} + +func (suite *ResourceManagerSuite) TestCheckResourceGroup() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + err := suite.manager.AddResourceGroup("rg") + suite.NoError(err) + suite.manager.AssignNode("rg", 1) + suite.manager.AssignNode("rg", 2) + suite.manager.AssignNode("rg", 3) + + suite.manager.HandleNodeDown(1) + lackNodes := suite.manager.CheckLackOfNode("rg") + suite.Equal(lackNodes, 1) + + suite.manager.nodeMgr.Remove(2) + suite.manager.checkRGNodeStatus("rg") + lackNodes = suite.manager.CheckLackOfNode("rg") + suite.Equal(lackNodes, 2) + + rg, err := suite.manager.FindResourceGroupByNode(3) + suite.NoError(err) + suite.Equal(rg, "rg") +} + +func (suite *ResourceManagerSuite) TestGetOutboundNode() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + suite.manager.AddResourceGroup("rg") + suite.manager.AddResourceGroup("rg1") + suite.manager.AssignNode("rg", 1) + suite.manager.AssignNode("rg", 2) + suite.manager.AssignNode("rg1", 3) + + replica := NewReplica( + &querypb.Replica{ + ID: 1, + CollectionID: 100, + ResourceGroup: "rg", + Nodes: []int64{1, 2, 3}, + }, + typeutil.NewUniqueSet(1, 2, 3), + ) + + outgoingNodes := suite.manager.GetOutgoingNodeNumByReplica(replica) + suite.NotNil(outgoingNodes) + suite.Len(outgoingNodes, 1) + suite.NotNil(outgoingNodes["rg1"]) + suite.Equal(outgoingNodes["rg1"], int32(1)) +} + +func (suite *ResourceManagerSuite) TestAutoRecover() { + suite.manager.nodeMgr.Add(session.NewNodeInfo(1, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(2, "localhost")) + suite.manager.nodeMgr.Add(session.NewNodeInfo(3, "localhost")) + err := suite.manager.AddResourceGroup("rg") + suite.NoError(err) + suite.manager.AssignNode(DefaultResourceGroupName, 1) + suite.manager.AssignNode(DefaultResourceGroupName, 2) + suite.manager.AssignNode("rg", 3) + + suite.manager.HandleNodeDown(3) + lackNodes := suite.manager.CheckLackOfNode("rg") + suite.Equal(lackNodes, 1) + suite.manager.AutoRecoverResourceGroup("rg") + lackNodes = suite.manager.CheckLackOfNode("rg") + suite.Equal(lackNodes, 0) +} + +func (suite *ResourceManagerSuite) TearDownSuite() { + suite.kv.Close() +} + +func TestResourceManager(t *testing.T) { + suite.Run(t, new(ResourceManagerSuite)) +} diff --git a/internal/querycoordv2/meta/segment_dist_manager.go b/internal/querycoordv2/meta/segment_dist_manager.go index 4ce3667366..a12d45911a 100644 --- a/internal/querycoordv2/meta/segment_dist_manager.go +++ b/internal/querycoordv2/meta/segment_dist_manager.go @@ -150,7 +150,7 @@ func (m *SegmentDistManager) GetByShardWithReplica(shard string, replica *Replic ret := make([]*Segment, 0) for nodeID, segments := range m.segments { - if !replica.Nodes.Contain(nodeID) { + if !replica.Contains(nodeID) { continue } for _, segment := range segments { diff --git a/internal/querycoordv2/meta/store.go b/internal/querycoordv2/meta/store.go index b4aef602ba..84436b29dd 100644 --- a/internal/querycoordv2/meta/store.go +++ b/internal/querycoordv2/meta/store.go @@ -28,7 +28,6 @@ import ( "github.com/milvus-io/milvus/internal/kv" "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/proto/querypb" - "github.com/milvus-io/milvus/internal/util" ) var ( @@ -41,6 +40,7 @@ const ( ReplicaPrefix = "querycoord-replica" CollectionMetaPrefixV1 = "queryCoord-collectionMeta" ReplicaMetaPrefixV1 = "queryCoord-ReplicaMeta" + ResourceGroupPrefix = "queryCoord-ResourceGroup" ) type WatchStoreChan = clientv3.WatchChan @@ -91,6 +91,26 @@ func (s metaStore) SaveReplica(replica *querypb.Replica) error { return s.cli.Save(key, string(value)) } +func (s metaStore) SaveResourceGroup(rgs ...*querypb.ResourceGroup) error { + ret := make(map[string]string) + for _, rg := range rgs { + key := encodeResourceGroupKey(rg.GetName()) + value, err := proto.Marshal(rg) + if err != nil { + return err + } + + ret[key] = string(value) + } + + return s.cli.MultiSave(ret) +} + +func (s metaStore) RemoveResourceGroup(rgName string) error { + key := encodeResourceGroupKey(rgName) + return s.cli.Remove(key) +} + func (s metaStore) GetCollections() ([]*querypb.CollectionLoadInfo, error) { _, values, err := s.cli.LoadWithPrefix(CollectionLoadInfoPrefix) if err != nil { @@ -171,6 +191,25 @@ func (s metaStore) getReplicasFromV1() ([]*querypb.Replica, error) { return ret, nil } +func (s metaStore) GetResourceGroups() ([]*querypb.ResourceGroup, error) { + _, rgs, err := s.cli.LoadWithPrefix(ResourceGroupPrefix) + if err != nil { + return nil, err + } + + ret := make([]*querypb.ResourceGroup, 0, len(rgs)) + for _, value := range rgs { + rg := &querypb.ResourceGroup{} + err := proto.Unmarshal([]byte(value), rg) + if err != nil { + return nil, err + } + + ret = append(ret, rg) + } + return ret, nil +} + func (s metaStore) ReleaseCollection(id int64) error { k := encodeCollectionLoadInfoKey(id) return s.cli.Remove(k) @@ -209,6 +248,6 @@ func encodeCollectionReplicaKey(collection int64) string { return fmt.Sprintf("%s/%d", ReplicaPrefix, collection) } -func encodeHandoffEventKey(collection, partition, segment int64) string { - return fmt.Sprintf("%s/%d/%d/%d", util.HandoffSegmentPrefix, collection, partition, segment) +func encodeResourceGroupKey(rgName string) string { + return fmt.Sprintf("%s/%s", ResourceGroupPrefix, rgName) } diff --git a/internal/querycoordv2/meta/store_test.go b/internal/querycoordv2/meta/store_test.go index 0601461cd6..bfcb547a2c 100644 --- a/internal/querycoordv2/meta/store_test.go +++ b/internal/querycoordv2/meta/store_test.go @@ -17,22 +17,151 @@ package meta import ( + "sort" "testing" + "github.com/milvus-io/milvus/internal/kv" + etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" + "github.com/milvus-io/milvus/internal/proto/querypb" + . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/util/etcd" "github.com/stretchr/testify/suite" ) type StoreTestSuite struct { suite.Suite + + kv kv.MetaKv store metaStore } -func (suite *StoreTestSuite) SetupTest() { - //kv := memkv.NewMemoryKV() - //suite.store = NewMetaStore(kv) +func (suite *StoreTestSuite) SetupSuite() { + Params.Init() } -func (suite *StoreTestSuite) TearDownTest() {} +func (suite *StoreTestSuite) SetupTest() { + config := GenerateEtcdConfig() + cli, err := etcd.GetEtcdClient( + config.UseEmbedEtcd.GetAsBool(), + config.EtcdUseSSL.GetAsBool(), + config.Endpoints.GetAsStrings(), + config.EtcdTLSCert.GetValue(), + config.EtcdTLSKey.GetValue(), + config.EtcdTLSCACert.GetValue(), + config.EtcdTLSMinVersion.GetValue()) + suite.Require().NoError(err) + suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath.GetValue()) + suite.store = NewMetaStore(suite.kv) +} + +func (suite *StoreTestSuite) TearDownTest() { + if suite.kv != nil { + suite.kv.Close() + } +} + +func (suite *StoreTestSuite) TestCollection() { + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 1, + }) + + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 2, + }) + + suite.store.SaveCollection(&querypb.CollectionLoadInfo{ + CollectionID: 3, + }) + + suite.store.ReleaseCollection(1) + suite.store.ReleaseCollection(2) + + collections, err := suite.store.GetCollections() + suite.NoError(err) + suite.Len(collections, 1) +} + +func (suite *StoreTestSuite) TestPartition() { + suite.store.SavePartition(&querypb.PartitionLoadInfo{ + PartitionID: 1, + }) + + suite.store.SavePartition(&querypb.PartitionLoadInfo{ + PartitionID: 2, + }) + + suite.store.SavePartition(&querypb.PartitionLoadInfo{ + PartitionID: 3, + }) + + suite.store.ReleasePartition(1) + suite.store.ReleasePartition(2) + + partitions, err := suite.store.GetPartitions() + suite.NoError(err) + suite.Len(partitions, 1) +} + +func (suite *StoreTestSuite) TestReplica() { + suite.store.SaveReplica(&querypb.Replica{ + CollectionID: 1, + ID: 1, + }) + + suite.store.SaveReplica(&querypb.Replica{ + CollectionID: 1, + ID: 2, + }) + + suite.store.SaveReplica(&querypb.Replica{ + CollectionID: 1, + ID: 3, + }) + + suite.store.ReleaseReplica(1, 1) + suite.store.ReleaseReplica(1, 2) + + replicas, err := suite.store.GetReplicas() + suite.NoError(err) + suite.Len(replicas, 1) +} + +func (suite *StoreTestSuite) TestResourceGroup() { + suite.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: "rg1", + Capacity: 3, + Nodes: []int64{1, 2, 3}, + }) + suite.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: "rg2", + Capacity: 3, + Nodes: []int64{4, 5}, + }) + + suite.store.SaveResourceGroup(&querypb.ResourceGroup{ + Name: "rg3", + Capacity: 0, + Nodes: []int64{}, + }) + + suite.store.RemoveResourceGroup("rg3") + + groups, err := suite.store.GetResourceGroups() + suite.NoError(err) + suite.Len(groups, 2) + + sort.Slice(groups, func(i, j int) bool { + return groups[i].GetName() < groups[j].GetName() + }) + + suite.Equal("rg1", groups[0].GetName()) + suite.Equal(int32(3), groups[0].GetCapacity()) + suite.Equal([]int64{1, 2, 3}, groups[0].GetNodes()) + + suite.Equal("rg2", groups[1].GetName()) + suite.Equal(int32(3), groups[1].GetCapacity()) + suite.Equal([]int64{4, 5}, groups[1].GetNodes()) +} func (suite *StoreTestSuite) TestLoadRelease() { // TODO(sunby): add ut diff --git a/internal/querycoordv2/meta/target_manager_test.go b/internal/querycoordv2/meta/target_manager_test.go index ab96b0869a..ff13d6f5d3 100644 --- a/internal/querycoordv2/meta/target_manager_test.go +++ b/internal/querycoordv2/meta/target_manager_test.go @@ -27,6 +27,7 @@ import ( "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/querypb" . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/typeutil" ) @@ -101,7 +102,7 @@ func (suite *TargetManagerSuite) SetupTest() { // meta store := NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = NewMeta(idAllocator, store) + suite.meta = NewMeta(idAllocator, store, session.NewNodeManager()) suite.broker = NewMockBroker(suite.T()) suite.mgr = NewTargetManager(suite.broker, suite.meta) diff --git a/internal/querycoordv2/observers/collection_observer_test.go b/internal/querycoordv2/observers/collection_observer_test.go index b74ecf45e5..fec8699ec1 100644 --- a/internal/querycoordv2/observers/collection_observer_test.go +++ b/internal/querycoordv2/observers/collection_observer_test.go @@ -32,6 +32,7 @@ import ( "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/querycoordv2/meta" . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/paramtable" ) @@ -178,7 +179,7 @@ func (suite *CollectionObserverSuite) SetupTest() { // Dependencies suite.dist = meta.NewDistributionManager() - suite.meta = meta.NewMeta(suite.idAllocator, suite.store) + suite.meta = meta.NewMeta(suite.idAllocator, suite.store, session.NewNodeManager()) suite.broker = meta.NewMockBroker(suite.T()) suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta) suite.targetObserver = NewTargetObserver(suite.meta, @@ -323,7 +324,7 @@ func (suite *CollectionObserverSuite) loadAll() { func (suite *CollectionObserverSuite) load(collection int64) { // Mock meta data - replicas, err := suite.meta.ReplicaManager.Spawn(collection, suite.replicaNumber[collection]) + replicas, err := suite.meta.ReplicaManager.Spawn(collection, suite.replicaNumber[collection], meta.DefaultResourceGroupName) suite.NoError(err) for _, replica := range replicas { replica.AddNode(suite.nodes...) diff --git a/internal/querycoordv2/observers/leader_observer_test.go b/internal/querycoordv2/observers/leader_observer_test.go index b9dd608a7a..eaf49b7b3a 100644 --- a/internal/querycoordv2/observers/leader_observer_test.go +++ b/internal/querycoordv2/observers/leader_observer_test.go @@ -67,7 +67,7 @@ func (suite *LeaderObserverTestSuite) SetupTest() { // meta store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = meta.NewMeta(idAllocator, store) + suite.meta = meta.NewMeta(idAllocator, store, session.NewNodeManager()) suite.broker = meta.NewMockBroker(suite.T()) suite.mockCluster = session.NewMockCluster(suite.T()) diff --git a/internal/querycoordv2/observers/replica_observer.go b/internal/querycoordv2/observers/replica_observer.go new file mode 100644 index 0000000000..1e099319b2 --- /dev/null +++ b/internal/querycoordv2/observers/replica_observer.go @@ -0,0 +1,112 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observers + +import ( + "context" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/params" +) + +// check replica, find outbound nodes and remove it from replica if all segment/channel has been moved +type ReplicaObserver struct { + c chan struct{} + wg sync.WaitGroup + meta *meta.Meta + distMgr *meta.DistributionManager + + stopOnce sync.Once +} + +func NewReplicaObserver(meta *meta.Meta, distMgr *meta.DistributionManager) *ReplicaObserver { + return &ReplicaObserver{ + c: make(chan struct{}), + meta: meta, + distMgr: distMgr, + } +} + +func (ob *ReplicaObserver) Start(ctx context.Context) { + ob.wg.Add(1) + go ob.schedule(ctx) +} + +func (ob *ReplicaObserver) Stop() { + ob.stopOnce.Do(func() { + close(ob.c) + ob.wg.Wait() + }) +} + +func (ob *ReplicaObserver) schedule(ctx context.Context) { + defer ob.wg.Done() + log.Info("Start check replica loop") + + ticker := time.NewTicker(params.Params.QueryCoordCfg.CheckNodeInReplicaInterval.GetAsDuration(time.Second)) + for { + select { + case <-ctx.Done(): + log.Info("Close replica observer due to context canceled") + return + case <-ob.c: + log.Info("Close replica observer") + return + + case <-ticker.C: + ob.checkNodesInReplica() + } + } +} + +func (ob *ReplicaObserver) checkNodesInReplica() { + collections := ob.meta.GetAll() + for _, collectionID := range collections { + replicas := ob.meta.ReplicaManager.GetByCollection(collectionID) + + for _, replica := range replicas { + outboundNodes := ob.meta.ResourceManager.CheckOutboundNodes(replica) + if len(outboundNodes) > 0 { + log.RatedInfo(10, "found outbound nodes in replica", + zap.Int64("collectionID", replica.GetCollectionID()), + zap.Int64("replicaID", replica.GetCollectionID()), + zap.Int64s("allOutboundNodes", outboundNodes.Collect()), + ) + + for node := range outboundNodes { + channels := ob.distMgr.ChannelDistManager.GetByCollectionAndNode(collectionID, node) + segments := ob.distMgr.SegmentDistManager.GetByCollectionAndNode(collectionID, node) + + if len(channels) == 0 && len(segments) == 0 { + replica.RemoveNode(node) + log.Info("all segment/channel has been removed from outbound node, remove it from replica", + zap.Int64("collectionID", replica.GetCollectionID()), + zap.Int64("replicaID", replica.GetCollectionID()), + zap.Int64("removedNodes", node), + zap.Int64s("availableNodes", replica.GetNodes()), + ) + } + } + } + } + } +} diff --git a/internal/querycoordv2/observers/replica_observer_test.go b/internal/querycoordv2/observers/replica_observer_test.go new file mode 100644 index 0000000000..c32e72a30c --- /dev/null +++ b/internal/querycoordv2/observers/replica_observer_test.go @@ -0,0 +1,134 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package observers + +import ( + "context" + "testing" + "time" + + etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/querycoordv2/utils" + "github.com/milvus-io/milvus/internal/util/etcd" + "github.com/milvus-io/milvus/internal/util/paramtable" + "github.com/stretchr/testify/suite" +) + +type ReplicaObserverSuite struct { + suite.Suite + + kv *etcdkv.EtcdKV + //dependency + meta *meta.Meta + distMgr *meta.DistributionManager + + observer *ReplicaObserver + + collectionID int64 + partitionID int64 +} + +func (suite *ReplicaObserverSuite) SetupSuite() { + paramtable.Init() + paramtable.Get().Save(Params.QueryCoordCfg.CheckNodeInReplicaInterval.Key, "1") +} + +func (suite *ReplicaObserverSuite) SetupTest() { + var err error + config := GenerateEtcdConfig() + cli, err := etcd.GetEtcdClient( + config.UseEmbedEtcd.GetAsBool(), + config.EtcdUseSSL.GetAsBool(), + config.Endpoints.GetAsStrings(), + config.EtcdTLSCert.GetValue(), + config.EtcdTLSKey.GetValue(), + config.EtcdTLSCACert.GetValue(), + config.EtcdTLSMinVersion.GetValue()) + suite.Require().NoError(err) + suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath.GetValue()) + + // meta + store := meta.NewMetaStore(suite.kv) + idAllocator := RandomIncrementIDAllocator() + suite.meta = meta.NewMeta(idAllocator, store, session.NewNodeManager()) + + suite.distMgr = meta.NewDistributionManager() + suite.observer = NewReplicaObserver(suite.meta, suite.distMgr) + suite.observer.Start(context.TODO()) + suite.collectionID = int64(1000) + suite.partitionID = int64(100) + + suite.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, 1) + err = suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(suite.collectionID, 1)) + suite.NoError(err) + replicas, err := suite.meta.ReplicaManager.Spawn(suite.collectionID, 1, meta.DefaultResourceGroupName) + suite.NoError(err) + err = suite.meta.ReplicaManager.Put(replicas...) + suite.NoError(err) +} + +func (suite *ReplicaObserverSuite) TestCheckNodesInReplica() { + replicas := suite.meta.ReplicaManager.GetByCollection(suite.collectionID) + + suite.distMgr.ChannelDistManager.Update(1, utils.CreateTestChannel(suite.collectionID, 2, 1, "test-insert-channel1")) + suite.distMgr.SegmentDistManager.Update(1, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 1, 100, 1, "test-insert-channel1")) + replicas[0].AddNode(1) + suite.distMgr.ChannelDistManager.Update(100, utils.CreateTestChannel(suite.collectionID, 100, 1, "test-insert-channel2")) + suite.distMgr.SegmentDistManager.Update(100, utils.CreateTestSegment(suite.collectionID, suite.partitionID, 2, 100, 1, "test-insert-channel2")) + replicas[0].AddNode(100) + + suite.Eventually(func() bool { + // node 100 should be kept + replicas := suite.meta.ReplicaManager.GetByCollection(suite.collectionID) + + for _, node := range replicas[0].GetNodes() { + if node == 100 { + return true + } + } + return false + }, 6*time.Second, 2*time.Second) + suite.Len(replicas[0].GetNodes(), 2) + + suite.distMgr.ChannelDistManager.Update(100) + suite.distMgr.SegmentDistManager.Update(100) + + suite.Eventually(func() bool { + // node 100 should be removed + replicas := suite.meta.ReplicaManager.GetByCollection(suite.collectionID) + + for _, node := range replicas[0].GetNodes() { + if node == 100 { + return false + } + } + return true + }, 5*time.Second, 1*time.Second) + suite.Len(replicas[0].GetNodes(), 1) + suite.Equal([]int64{1}, replicas[0].GetNodes()) +} + +func (suite *ReplicaObserverSuite) TearDownSuite() { + suite.kv.Close() + suite.observer.Stop() +} + +func TestReplicaObserver(t *testing.T) { + suite.Run(t, new(ReplicaObserverSuite)) +} diff --git a/internal/querycoordv2/observers/resource_observer.go b/internal/querycoordv2/observers/resource_observer.go new file mode 100644 index 0000000000..7c78241291 --- /dev/null +++ b/internal/querycoordv2/observers/resource_observer.go @@ -0,0 +1,107 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observers + +import ( + "context" + "sync" + "time" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + "github.com/milvus-io/milvus/internal/querycoordv2/params" + "go.uber.org/zap" +) + +// check whether rg lack of node, try to transfer node from default rg +type ResourceObserver struct { + c chan struct{} + wg sync.WaitGroup + meta *meta.Meta + + stopOnce sync.Once +} + +func NewResourceObserver(meta *meta.Meta) *ResourceObserver { + return &ResourceObserver{ + c: make(chan struct{}), + meta: meta, + } +} + +func (ob *ResourceObserver) Start(ctx context.Context) { + ob.wg.Add(1) + go ob.schedule(ctx) +} + +func (ob *ResourceObserver) Stop() { + ob.stopOnce.Do(func() { + close(ob.c) + ob.wg.Wait() + }) +} + +func (ob *ResourceObserver) schedule(ctx context.Context) { + defer ob.wg.Done() + log.Info("Start check resource group loop") + + ticker := time.NewTicker(params.Params.QueryCoordCfg.CheckResourceGroupInterval.GetAsDuration(time.Second)) + for { + select { + case <-ctx.Done(): + log.Info("Close resource group observer due to context canceled") + return + case <-ob.c: + log.Info("Close resource group observer") + return + + case <-ticker.C: + ob.checkResourceGroup() + } + } +} + +func (ob *ResourceObserver) checkResourceGroup() { + manager := ob.meta.ResourceManager + rgNames := manager.ListResourceGroups() + + enableRGAutoRecover := params.Params.QueryCoordCfg.EnableRGAutoRecover.GetAsBool() + + for _, rgName := range rgNames { + if rgName == meta.DefaultResourceGroupName { + continue + } + lackNodeNum := manager.CheckLackOfNode(rgName) + if lackNodeNum > 0 { + log.Info("found resource group lack of nodes", + zap.String("rgName", rgName), + zap.Int("lackNodeNum", lackNodeNum), + ) + + if enableRGAutoRecover { + usedNodeNum, err := manager.AutoRecoverResourceGroup(rgName) + if err != nil { + log.Warn("failed to recover resource group", + zap.String("rgName", rgName), + zap.Int("lackNodeNum", lackNodeNum-usedNodeNum), + zap.Error(err), + ) + } + } + } + } +} diff --git a/internal/querycoordv2/observers/resource_observer_test.go b/internal/querycoordv2/observers/resource_observer_test.go new file mode 100644 index 0000000000..a1d7c37559 --- /dev/null +++ b/internal/querycoordv2/observers/resource_observer_test.go @@ -0,0 +1,111 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package observers + +import ( + "context" + "testing" + "time" + + etcdKV "github.com/milvus-io/milvus/internal/kv/etcd" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/util/etcd" + "github.com/milvus-io/milvus/internal/util/paramtable" + "github.com/stretchr/testify/suite" +) + +type ResourceObserverSuite struct { + suite.Suite + + kv *etcdKV.EtcdKV + //dependency + meta *meta.Meta + observer *ResourceObserver + nodeMgr *session.NodeManager + + collectionID int64 + partitionID int64 +} + +func (suite *ResourceObserverSuite) SetupSuite() { + paramtable.Init() + paramtable.Get().Save(Params.QueryCoordCfg.CheckResourceGroupInterval.Key, "3") +} + +func (suite *ResourceObserverSuite) SetupTest() { + var err error + config := GenerateEtcdConfig() + cli, err := etcd.GetEtcdClient( + config.UseEmbedEtcd.GetAsBool(), + config.EtcdUseSSL.GetAsBool(), + config.Endpoints.GetAsStrings(), + config.EtcdTLSCert.GetValue(), + config.EtcdTLSKey.GetValue(), + config.EtcdTLSCACert.GetValue(), + config.EtcdTLSMinVersion.GetValue()) + suite.Require().NoError(err) + suite.kv = etcdKV.NewEtcdKV(cli, config.MetaRootPath.GetValue()) + + // meta + store := meta.NewMetaStore(suite.kv) + idAllocator := RandomIncrementIDAllocator() + suite.nodeMgr = session.NewNodeManager() + suite.meta = meta.NewMeta(idAllocator, store, suite.nodeMgr) + + suite.observer = NewResourceObserver(suite.meta) + suite.observer.Start(context.TODO()) + + for i := 1; i < 10; i++ { + suite.nodeMgr.Add(session.NewNodeInfo(int64(i), "localhost")) + suite.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, int64(i)) + } +} + +func (suite *ResourceObserverSuite) TestCheckNodesInReplica() { + suite.meta.ResourceManager.AddResourceGroup("rg") + suite.nodeMgr.Add(session.NewNodeInfo(int64(100), "localhost")) + suite.nodeMgr.Add(session.NewNodeInfo(int64(101), "localhost")) + suite.nodeMgr.Add(session.NewNodeInfo(int64(102), "localhost")) + suite.meta.ResourceManager.AssignNode("rg", 100) + suite.meta.ResourceManager.AssignNode("rg", 101) + suite.meta.ResourceManager.AssignNode("rg", 102) + suite.meta.ResourceManager.HandleNodeDown(100) + suite.meta.ResourceManager.HandleNodeDown(101) + + //before auto recover rg + suite.Eventually(func() bool { + lackNodesNum := suite.meta.ResourceManager.CheckLackOfNode("rg") + return lackNodesNum == 2 + }, 5*time.Second, 1*time.Second) + + // after auto recover rg + suite.Eventually(func() bool { + lackNodesNum := suite.meta.ResourceManager.CheckLackOfNode("rg") + return lackNodesNum == 0 + }, 5*time.Second, 1*time.Second) + +} + +func (suite *ResourceObserverSuite) TearDownSuite() { + suite.kv.Close() + suite.observer.Stop() +} + +func TestResourceObserver(t *testing.T) { + suite.Run(t, new(ResourceObserverSuite)) +} diff --git a/internal/querycoordv2/observers/target_observer_test.go b/internal/querycoordv2/observers/target_observer_test.go index baab112156..838e4e2242 100644 --- a/internal/querycoordv2/observers/target_observer_test.go +++ b/internal/querycoordv2/observers/target_observer_test.go @@ -29,6 +29,7 @@ import ( "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/querycoordv2/meta" . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/milvus-io/milvus/internal/querycoordv2/utils" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/paramtable" @@ -74,7 +75,7 @@ func (suite *TargetObserverSuite) SetupTest() { // meta store := meta.NewMetaStore(suite.kv) idAllocator := RandomIncrementIDAllocator() - suite.meta = meta.NewMeta(idAllocator, store) + suite.meta = meta.NewMeta(idAllocator, store, session.NewNodeManager()) suite.broker = meta.NewMockBroker(suite.T()) suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta) @@ -86,7 +87,7 @@ func (suite *TargetObserverSuite) SetupTest() { err = suite.meta.CollectionManager.PutCollection(utils.CreateTestCollection(suite.collectionID, 1)) suite.NoError(err) - replicas, err := suite.meta.ReplicaManager.Spawn(suite.collectionID, 1) + replicas, err := suite.meta.ReplicaManager.Spawn(suite.collectionID, 1, meta.DefaultResourceGroupName) suite.NoError(err) replicas[0].AddNode(2) err = suite.meta.ReplicaManager.Put(replicas...) @@ -212,6 +213,6 @@ func (suite *TargetObserverSuite) TearDownSuite() { suite.observer.Stop() } -func TestTargetManager(t *testing.T) { +func TestTargetObserver(t *testing.T) { suite.Run(t, new(TargetObserverSuite)) } diff --git a/internal/querycoordv2/server.go b/internal/querycoordv2/server.go index d8bff40973..4574c1c68b 100644 --- a/internal/querycoordv2/server.go +++ b/internal/querycoordv2/server.go @@ -103,6 +103,8 @@ type Server struct { collectionObserver *observers.CollectionObserver leaderObserver *observers.LeaderObserver targetObserver *observers.TargetObserver + replicaObserver *observers.ReplicaObserver + resourceObserver *observers.ResourceObserver balancer balance.Balance @@ -177,13 +179,13 @@ func (s *Server) Init() error { s.metricsCacheManager = metricsinfo.NewMetricsCacheManager() // Init meta + s.nodeMgr = session.NewNodeManager() err = s.initMeta() if err != nil { return err } // Init session log.Info("init session") - s.nodeMgr = session.NewNodeManager() s.cluster = session.NewCluster(s.nodeMgr, s.queryNodeCreator) // Init schedulers @@ -244,7 +246,7 @@ func (s *Server) initMeta() error { log.Info("init meta") s.store = meta.NewMetaStore(s.kv) - s.meta = meta.NewMeta(s.idAllocator, s.store) + s.meta = meta.NewMeta(s.idAllocator, s.store, s.nodeMgr) log.Info("recover meta...") err := s.meta.CollectionManager.Recover() @@ -262,6 +264,12 @@ func (s *Server) initMeta() error { return err } + err = s.meta.ResourceManager.Recover() + if err != nil { + log.Error("failed to recover resource groups") + return err + } + s.dist = &meta.DistributionManager{ SegmentDistManager: meta.NewSegmentDistManager(), ChannelDistManager: meta.NewChannelDistManager(), @@ -297,6 +305,13 @@ func (s *Server) initObserver() { s.targetMgr, s.targetObserver, ) + + s.replicaObserver = observers.NewReplicaObserver( + s.meta, + s.dist, + ) + + s.resourceObserver = observers.NewResourceObserver(s.meta) } func (s *Server) afterStart() { @@ -360,6 +375,8 @@ func (s *Server) startServerLoop() { s.collectionObserver.Start(s.ctx) s.leaderObserver.Start(s.ctx) s.targetObserver.Start(s.ctx) + s.replicaObserver.Start(s.ctx) + s.resourceObserver.Start(s.ctx) } func (s *Server) Stop() error { @@ -403,6 +420,12 @@ func (s *Server) Stop() error { if s.targetObserver != nil { s.targetObserver.Stop() } + if s.replicaObserver != nil { + s.replicaObserver.Stop() + } + if s.resourceObserver != nil { + s.resourceObserver.Stop() + } s.wg.Wait() log.Info("QueryCoord stop successfully") @@ -580,17 +603,33 @@ func (s *Server) handleNodeUp(node int64) { s.taskScheduler.AddExecutor(node) s.distController.StartDistInstance(s.ctx, node) + // need assign to new rg and replica + rgName, err := s.meta.ResourceManager.HandleNodeUp(node) + if err != nil { + log.Warn("HandleNodeUp: failed to assign node to resource group", + zap.Error(err), + ) + return + } + + log.Info("HandleNodeUp: assign node to resource group", + zap.String("resourceGroup", rgName), + ) + for _, collection := range s.meta.CollectionManager.GetAll() { log := log.With(zap.Int64("collectionID", collection)) replica := s.meta.ReplicaManager.GetByCollectionAndNode(collection, node) if replica == nil { - replicas := s.meta.ReplicaManager.GetByCollection(collection) + replicas := s.meta.ReplicaManager.GetByCollectionAndRG(collection, rgName) + if len(replicas) == 0 { + continue + } sort.Slice(replicas, func(i, j int) bool { - return replicas[i].Nodes.Len() < replicas[j].Nodes.Len() + return replicas[i].Len() < replicas[j].Len() }) replica := replicas[0] // TODO(yah01): this may fail, need a component to check whether a node is assigned - err := s.meta.ReplicaManager.AddNode(replica.GetID(), node) + err = s.meta.ReplicaManager.AddNode(replica.GetID(), node) if err != nil { log.Warn("failed to assign node to replicas", zap.Int64("replicaID", replica.GetID()), @@ -608,20 +647,6 @@ func (s *Server) handleNodeDown(node int64) { s.taskScheduler.RemoveExecutor(node) s.distController.Remove(node) - // Refresh the targets, to avoid consuming messages too early from channel - // FIXME(yah01): the leads to miss data, the segments flushed between the two check points - // are missed, it will recover for a while. - channels := s.dist.ChannelDistManager.GetByNode(node) - for _, channel := range channels { - _, err := s.targetObserver.UpdateNextTarget(channel.GetCollectionID()) - if err != nil { - msg := "failed to update next targets for collection" - log.Error(msg, - zap.Error(err)) - continue - } - } - // Clear dist s.dist.LeaderViewManager.Update(node) s.dist.ChannelDistManager.Update(node) @@ -647,6 +672,19 @@ func (s *Server) handleNodeDown(node int64) { // Clear tasks s.taskScheduler.RemoveByNode(node) + + rgName, err := s.meta.ResourceManager.HandleNodeDown(node) + if err != nil { + log.Warn("HandleNodeDown: failed to remove node from resource group", + zap.String("resourceGroup", rgName), + zap.Error(err), + ) + return + } + + log.Info("HandleNodeDown: remove node from resource group", + zap.String("resourceGroup", rgName), + ) } // checkReplicas checks whether replica contains offline node, and remove those nodes @@ -657,7 +695,7 @@ func (s *Server) checkReplicas() { for _, replica := range replicas { replica := replica.Clone() toRemove := make([]int64, 0) - for node := range replica.Nodes { + for _, node := range replica.GetNodes() { if s.nodeMgr.Get(node) == nil { toRemove = append(toRemove, node) } diff --git a/internal/querycoordv2/server_test.go b/internal/querycoordv2/server_test.go index b71a7574c4..cb05176af1 100644 --- a/internal/querycoordv2/server_test.go +++ b/internal/querycoordv2/server_test.go @@ -110,6 +110,7 @@ func (suite *ServerSuite) SetupTest() { suite.Require().NoError(err) ok := suite.waitNodeUp(suite.nodes[i], 5*time.Second) suite.Require().True(ok) + suite.server.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, suite.nodes[i].ID) } suite.loadAll() @@ -184,7 +185,6 @@ func (suite *ServerSuite) TestNodeUp() { } return true }, 5*time.Second, time.Second) - } func (suite *ServerSuite) TestNodeUpdate() { diff --git a/internal/querycoordv2/services.go b/internal/querycoordv2/services.go index 803fba2e92..a1f5dddc61 100644 --- a/internal/querycoordv2/services.go +++ b/internal/querycoordv2/services.go @@ -45,6 +45,16 @@ import ( var ( successStatus = utils.WrapStatus(commonpb.ErrorCode_Success, "") + + ErrCreateResourceGroupFailed = errors.New("failed to create resource group") + ErrDropResourceGroupFailed = errors.New("failed to drop resource group") + ErrAddNodeToRGFailed = errors.New("failed to add node to resource group") + ErrRemoveNodeFromRGFailed = errors.New("failed to remove node from resource group") + ErrTransferNodeFailed = errors.New("failed to transfer node between resource group") + ErrTransferReplicaFailed = errors.New("failed to transfer replica between resource group") + ErrListResourceGroupsFailed = errors.New("failed to list resource group") + ErrDescribeResourceGroupFailed = errors.New("failed to describe resource group") + ErrLoadUseWrongRG = errors.New("load operation should use collection's resource group") ) func (s *Server) ShowCollections(ctx context.Context, req *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error) { @@ -218,6 +228,13 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection return s.refreshCollection(ctx, req.GetCollectionID()) } + if err := s.checkResourceGroup(req.GetCollectionID(), req.GetResourceGroups()); err != nil { + msg := "failed to load collection" + log.Warn(msg, zap.Error(err)) + metrics.QueryCoordLoadCount.WithLabelValues(metrics.FailLabel).Inc() + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, msg, err), nil + } + loadJob := job.NewLoadCollectionJob(ctx, req, s.dist, @@ -282,6 +299,8 @@ func (s *Server) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error) { log := log.Ctx(ctx).With( zap.Int64("collectionID", req.GetCollectionID()), + zap.Int32("replicaNumber", req.GetReplicaNumber()), + zap.Strings("resourceGroups", req.GetResourceGroups()), ) log.Info("received load partitions request", @@ -300,6 +319,14 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions // If refresh mode is ON. if req.GetRefresh() { return s.refreshPartitions(ctx, req.GetCollectionID(), req.GetPartitionIDs()) + + } + + if err := s.checkResourceGroup(req.GetCollectionID(), req.GetResourceGroups()); err != nil { + msg := "failed to load partitions" + log.Warn(msg, zap.Error(ErrLoadUseWrongRG)) + metrics.QueryCoordLoadCount.WithLabelValues(metrics.FailLabel).Inc() + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, msg, ErrLoadUseWrongRG), nil } loadJob := job.NewLoadPartitionJob(ctx, @@ -323,6 +350,19 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions return successStatus, nil } +func (s *Server) checkResourceGroup(collectionID int64, resourceGroups []string) error { + if len(resourceGroups) != 0 { + collectionUsedRG := s.meta.ReplicaManager.GetResourceGroupByCollection(collectionID) + for _, rgName := range resourceGroups { + if !collectionUsedRG.Contain(rgName) { + return ErrLoadUseWrongRG + } + } + } + + return nil +} + func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { log := log.Ctx(ctx).With( zap.Int64("collectionID", req.GetCollectionID()), @@ -637,7 +677,7 @@ func (s *Server) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques fmt.Sprintf("can't balance, because the source node[%d] is invalid", srcNode), err), nil } for _, dstNode := range req.GetDstNodeIDs() { - if !replica.Nodes.Contain(dstNode) { + if !replica.Contains(dstNode) { msg := "destination nodes have to be in the same replica of source node" log.Warn(msg) return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, msg), nil @@ -924,3 +964,204 @@ func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthReque return &milvuspb.CheckHealthResponse{IsHealthy: true, Reasons: errReasons}, nil } + +func (s *Server) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) { + log := log.Ctx(ctx).With( + zap.String("rgName", req.GetResourceGroup()), + ) + + log.Info("create resource group request received") + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrCreateResourceGroupFailed.Error(), zap.Error(ErrNotHealthy)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrCreateResourceGroupFailed.Error(), ErrNotHealthy), nil + } + + err := s.meta.ResourceManager.AddResourceGroup(req.GetResourceGroup()) + if err != nil { + log.Warn(ErrCreateResourceGroupFailed.Error(), zap.Error(err)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrCreateResourceGroupFailed.Error(), err), nil + } + return successStatus, nil +} + +func (s *Server) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) { + log := log.Ctx(ctx).With( + zap.String("rgName", req.GetResourceGroup()), + ) + + log.Info("drop resource group request received") + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrDropResourceGroupFailed.Error(), zap.Error(ErrNotHealthy)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrDropResourceGroupFailed.Error(), ErrNotHealthy), nil + } + + err := s.meta.ResourceManager.RemoveResourceGroup(req.GetResourceGroup()) + if err != nil { + log.Warn(ErrDropResourceGroupFailed.Error(), zap.Error(err)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrDropResourceGroupFailed.Error(), err), nil + } + return successStatus, nil +} + +func (s *Server) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) { + log := log.Ctx(ctx).With( + zap.String("source", req.GetSourceResourceGroup()), + zap.String("target", req.GetTargetResourceGroup()), + ) + + log.Info("transfer node between resource group request received") + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrTransferNodeFailed.Error(), zap.Error(ErrNotHealthy)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrTransferNodeFailed.Error(), ErrNotHealthy), nil + } + + if ok := s.meta.ResourceManager.ContainResourceGroup(req.GetSourceResourceGroup()); !ok { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, + fmt.Sprintf("the source resource group[%s] doesn't exist", req.GetTargetResourceGroup()), meta.ErrRGNotExist), nil + } + + if ok := s.meta.ResourceManager.ContainResourceGroup(req.GetTargetResourceGroup()); !ok { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, + fmt.Sprintf("the target resource group[%s] doesn't exist", req.GetTargetResourceGroup()), meta.ErrRGNotExist), nil + } + + err := s.meta.ResourceManager.TransferNode(req.GetSourceResourceGroup(), req.GetTargetResourceGroup()) + if err != nil { + log.Warn(ErrTransferNodeFailed.Error(), zap.Error(err)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrTransferNodeFailed.Error(), err), nil + } + + return successStatus, nil +} + +func (s *Server) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) { + log := log.Ctx(ctx).With( + zap.String("source", req.GetSourceResourceGroup()), + zap.String("target", req.GetTargetResourceGroup()), + zap.Int64("collectionID", req.GetCollectionID()), + ) + + log.Info("transfer replica request received") + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrTransferReplicaFailed.Error(), zap.Error(ErrNotHealthy)) + return utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrTransferReplicaFailed.Error(), ErrNotHealthy), nil + } + + if ok := s.meta.ResourceManager.ContainResourceGroup(req.GetSourceResourceGroup()); !ok { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, + fmt.Sprintf("the source resource group[%s] doesn't exist", req.GetSourceResourceGroup()), meta.ErrRGNotExist), nil + } + + if ok := s.meta.ResourceManager.ContainResourceGroup(req.GetTargetResourceGroup()); !ok { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, + fmt.Sprintf("the target resource group[%s] doesn't exist", req.GetTargetResourceGroup()), meta.ErrRGNotExist), nil + } + + // for now, we don't support to transfer replica of same collection to same resource group + replicas := s.meta.ReplicaManager.GetByCollectionAndRG(req.GetCollectionID(), req.GetSourceResourceGroup()) + if len(replicas) < int(req.GetNumReplica()) { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, + fmt.Sprintf("found [%d] replicas of collection[%d] in source resource group[%s]", + len(replicas), req.GetCollectionID(), req.GetSourceResourceGroup())), nil + } + + err := s.transferReplica(req.GetTargetResourceGroup(), replicas[:req.GetNumReplica()]) + if err != nil { + return utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, ErrTransferReplicaFailed.Error(), err), nil + } + + return successStatus, nil +} + +func (s *Server) transferReplica(targetRG string, replicas []*meta.Replica) error { + ret := make([]*meta.Replica, 0) + for _, replica := range replicas { + newReplica := replica.Clone() + newReplica.ResourceGroup = targetRG + + ret = append(ret, newReplica) + } + err := utils.AssignNodesToReplicas(s.meta, targetRG, ret...) + if err != nil { + return err + } + + return s.meta.ReplicaManager.Put(ret...) +} + +func (s *Server) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) { + log := log.Ctx(ctx) + + log.Info("list resource group request received") + resp := &milvuspb.ListResourceGroupsResponse{ + Status: successStatus, + } + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrListResourceGroupsFailed.Error(), zap.Error(ErrNotHealthy)) + resp.Status = utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrListResourceGroupsFailed.Error(), ErrNotHealthy) + return resp, nil + } + + resp.ResourceGroups = s.meta.ResourceManager.ListResourceGroups() + return resp, nil +} + +func (s *Server) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) { + log := log.Ctx(ctx).With( + zap.String("rgName", req.GetResourceGroup()), + ) + + log.Info("describe resource group request received") + resp := &querypb.DescribeResourceGroupResponse{ + Status: successStatus, + } + if s.status.Load() != commonpb.StateCode_Healthy { + log.Warn(ErrDescribeResourceGroupFailed.Error(), zap.Error(ErrNotHealthy)) + resp.Status = utils.WrapStatus(commonpb.ErrorCode_UnexpectedError, ErrDescribeResourceGroupFailed.Error(), ErrNotHealthy) + return resp, nil + } + + rg, err := s.meta.ResourceManager.GetResourceGroup(req.GetResourceGroup()) + if err != nil { + resp.Status = utils.WrapStatus(commonpb.ErrorCode_IllegalArgument, ErrDescribeResourceGroupFailed.Error(), err) + return resp, nil + } + + loadedReplicas := make(map[int64]int32) + outgoingNodes := make(map[int64]int32) + replicasInRG := s.meta.GetByResourceGroup(req.GetResourceGroup()) + for _, replica := range replicasInRG { + loadedReplicas[replica.GetCollectionID()]++ + for _, node := range replica.GetNodes() { + if !s.meta.ContainsNode(replica.GetResourceGroup(), node) { + outgoingNodes[replica.GetCollectionID()]++ + } + } + } + incomingNodes := make(map[int64]int32) + collections := s.meta.GetAll() + for _, collection := range collections { + replicas := s.meta.GetByCollection(collection) + + for _, replica := range replicas { + if replica.GetResourceGroup() == req.GetResourceGroup() { + continue + } + for _, node := range replica.GetNodes() { + if s.meta.ContainsNode(req.GetResourceGroup(), node) { + incomingNodes[collection]++ + } + } + } + } + + resp.ResourceGroup = &querypb.ResourceGroupInfo{ + Name: req.GetResourceGroup(), + Capacity: int32(rg.GetCapacity()), + NumAvailableNode: int32(len(rg.GetNodes())), + NumLoadedReplica: loadedReplicas, + NumOutgoingNode: outgoingNodes, + NumIncomingNode: incomingNodes, + } + return resp, nil +} diff --git a/internal/querycoordv2/services_test.go b/internal/querycoordv2/services_test.go index d1d7751621..dc5be0414b 100644 --- a/internal/querycoordv2/services_test.go +++ b/internal/querycoordv2/services_test.go @@ -128,7 +128,8 @@ func (suite *ServiceSuite) SetupTest() { suite.store = meta.NewMetaStore(suite.kv) suite.dist = meta.NewDistributionManager() - suite.meta = meta.NewMeta(params.RandomIncrementIDAllocator(), suite.store) + suite.nodeMgr = session.NewNodeManager() + suite.meta = meta.NewMeta(params.RandomIncrementIDAllocator(), suite.store, suite.nodeMgr) suite.broker = meta.NewMockBroker(suite.T()) suite.targetMgr = meta.NewTargetManager(suite.broker, suite.meta) suite.targetObserver = observers.NewTargetObserver( @@ -137,9 +138,10 @@ func (suite *ServiceSuite) SetupTest() { suite.dist, suite.broker, ) - suite.nodeMgr = session.NewNodeManager() for _, node := range suite.nodes { suite.nodeMgr.Add(session.NewNodeInfo(node, "localhost")) + err := suite.meta.ResourceManager.AssignNode(meta.DefaultResourceGroupName, node) + suite.NoError(err) } suite.cluster = session.NewMockCluster(suite.T()) suite.jobScheduler = job.NewScheduler() @@ -334,6 +336,260 @@ func (suite *ServiceSuite) TestLoadCollection() { suite.Contains(resp.Reason, ErrNotHealthy.Error()) } +func (suite *ServiceSuite) TestResourceGroup() { + ctx := context.Background() + server := suite.server + + createRG := &milvuspb.CreateResourceGroupRequest{ + ResourceGroup: "rg1", + } + + resp, err := server.CreateResourceGroup(ctx, createRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) + + resp, err = server.CreateResourceGroup(ctx, createRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.ErrorCode) + suite.Contains(resp.Reason, ErrCreateResourceGroupFailed.Error()) + suite.Contains(resp.Reason, meta.ErrRGAlreadyExist.Error()) + + listRG := &milvuspb.ListResourceGroupsRequest{} + resp1, err := server.ListResourceGroups(ctx, listRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp1.Status.ErrorCode) + suite.Len(resp1.ResourceGroups, 2) + + server.nodeMgr.Add(session.NewNodeInfo(1011, "localhost")) + server.nodeMgr.Add(session.NewNodeInfo(1012, "localhost")) + server.nodeMgr.Add(session.NewNodeInfo(1013, "localhost")) + server.nodeMgr.Add(session.NewNodeInfo(1014, "localhost")) + server.meta.ResourceManager.AddResourceGroup("rg11") + server.meta.ResourceManager.AssignNode("rg11", 1011) + server.meta.ResourceManager.AssignNode("rg11", 1012) + server.meta.ResourceManager.AddResourceGroup("rg12") + server.meta.ResourceManager.AssignNode("rg12", 1013) + server.meta.ResourceManager.AssignNode("rg12", 1014) + server.meta.CollectionManager.PutCollection(utils.CreateTestCollection(1, 1)) + server.meta.CollectionManager.PutCollection(utils.CreateTestCollection(2, 1)) + server.meta.ReplicaManager.Put(meta.NewReplica(&querypb.Replica{ + ID: 1, + CollectionID: 1, + Nodes: []int64{1011, 1013}, + ResourceGroup: "rg11"}, + typeutil.NewUniqueSet(1011, 1013)), + ) + server.meta.ReplicaManager.Put(meta.NewReplica(&querypb.Replica{ + ID: 2, + CollectionID: 2, + Nodes: []int64{1012, 1014}, + ResourceGroup: "rg12"}, + typeutil.NewUniqueSet(1012, 1014)), + ) + + describeRG := &querypb.DescribeResourceGroupRequest{ + ResourceGroup: "rg11", + } + resp2, err := server.DescribeResourceGroup(ctx, describeRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp2.Status.ErrorCode) + suite.Equal("rg11", resp2.GetResourceGroup().GetName()) + suite.Equal(int32(2), resp2.GetResourceGroup().GetCapacity()) + suite.Equal(int32(2), resp2.GetResourceGroup().GetNumAvailableNode()) + suite.Equal(map[int64]int32{1: 1}, resp2.GetResourceGroup().GetNumLoadedReplica()) + suite.Equal(map[int64]int32{2: 1}, resp2.GetResourceGroup().GetNumIncomingNode()) + suite.Equal(map[int64]int32{1: 1}, resp2.GetResourceGroup().GetNumOutgoingNode()) + + dropRG := &milvuspb.DropResourceGroupRequest{ + ResourceGroup: "rg1", + } + + resp3, err := server.DropResourceGroup(ctx, dropRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp3.ErrorCode) + + resp4, err := server.ListResourceGroups(ctx, listRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp4.Status.ErrorCode) + suite.Len(resp4.GetResourceGroups(), 3) +} + +func (suite *ServiceSuite) TestResourceGroupFailed() { + ctx := context.Background() + server := suite.server + + // illegal argument + describeRG := &querypb.DescribeResourceGroupRequest{ + ResourceGroup: "rfffff", + } + resp, err := server.DescribeResourceGroup(ctx, describeRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.Status.ErrorCode) + + // server unhealthy + server.status.Store(commonpb.StateCode_Abnormal) + + createRG := &milvuspb.CreateResourceGroupRequest{ + ResourceGroup: "rg1", + } + + resp1, err := server.CreateResourceGroup(ctx, createRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp1.ErrorCode) + + listRG := &milvuspb.ListResourceGroupsRequest{} + resp2, err := server.ListResourceGroups(ctx, listRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp2.Status.ErrorCode) + + describeRG = &querypb.DescribeResourceGroupRequest{ + ResourceGroup: "rg1", + } + resp3, err := server.DescribeResourceGroup(ctx, describeRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp3.Status.ErrorCode) + + dropRG := &milvuspb.DropResourceGroupRequest{ + ResourceGroup: "rg1", + } + resp4, err := server.DropResourceGroup(ctx, dropRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp4.ErrorCode) + + resp5, err := server.ListResourceGroups(ctx, listRG) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp5.Status.ErrorCode) +} + +func (suite *ServiceSuite) TestTransferNode() { + ctx := context.Background() + server := suite.server + + err := server.meta.ResourceManager.AddResourceGroup("rg1") + suite.NoError(err) + err = server.meta.ResourceManager.AddResourceGroup("rg2") + suite.NoError(err) + // test transfer node + resp, err := server.TransferNode(ctx, &milvuspb.TransferNodeRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rg1", + }) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode) + nodes, err := server.meta.ResourceManager.GetNodes("rg1") + suite.NoError(err) + suite.Len(nodes, 1) + + // test transfer node meet non-exist source rg + resp, err = server.TransferNode(ctx, &milvuspb.TransferNodeRequest{ + SourceResourceGroup: "rgggg", + TargetResourceGroup: meta.DefaultResourceGroupName, + }) + suite.NoError(err) + suite.Contains(resp.Reason, meta.ErrRGNotExist.Error()) + suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) + + // test transfer node meet non-exist target rg + resp, err = server.TransferNode(ctx, &milvuspb.TransferNodeRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rgggg", + }) + suite.NoError(err) + suite.Contains(resp.Reason, meta.ErrRGNotExist.Error()) + suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) + + // server unhealthy + server.status.Store(commonpb.StateCode_Abnormal) + resp, err = server.TransferNode(ctx, &milvuspb.TransferNodeRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rg1", + }) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.ErrorCode) +} + +func (suite *ServiceSuite) TestTransferReplica() { + ctx := context.Background() + server := suite.server + + err := server.meta.ResourceManager.AddResourceGroup("rg1") + suite.NoError(err) + err = server.meta.ResourceManager.AddResourceGroup("rg2") + suite.NoError(err) + err = server.meta.ResourceManager.AddResourceGroup("rg3") + suite.NoError(err) + + resp, err := suite.server.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rg1", + CollectionID: 1, + NumReplica: 2, + }) + suite.NoError(err) + suite.Contains(resp.Reason, "found [0] replicas of collection[1] in source resource group") + + resp, err = suite.server.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: "rgg", + TargetResourceGroup: meta.DefaultResourceGroupName, + CollectionID: 1, + NumReplica: 2, + }) + suite.NoError(err) + suite.Equal(resp.ErrorCode, commonpb.ErrorCode_IllegalArgument) + + resp, err = suite.server.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rgg", + CollectionID: 1, + NumReplica: 2, + }) + suite.NoError(err) + suite.Equal(resp.ErrorCode, commonpb.ErrorCode_IllegalArgument) + + suite.server.meta.Put(meta.NewReplica(&querypb.Replica{ + CollectionID: 1, + ID: 111, + ResourceGroup: meta.DefaultResourceGroupName, + }, typeutil.NewUniqueSet(1))) + suite.server.meta.Put(meta.NewReplica(&querypb.Replica{ + CollectionID: 1, + ID: 222, + ResourceGroup: meta.DefaultResourceGroupName, + }, typeutil.NewUniqueSet(2))) + + suite.server.nodeMgr.Add(session.NewNodeInfo(1001, "localhost")) + suite.server.nodeMgr.Add(session.NewNodeInfo(1002, "localhost")) + suite.server.nodeMgr.Add(session.NewNodeInfo(1003, "localhost")) + suite.server.nodeMgr.Add(session.NewNodeInfo(1004, "localhost")) + suite.server.meta.AssignNode("rg1", 1001) + suite.server.meta.AssignNode("rg2", 1002) + suite.server.meta.AssignNode("rg3", 1003) + suite.server.meta.AssignNode("rg3", 1004) + + resp, err = suite.server.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rg3", + CollectionID: 1, + NumReplica: 2, + }) + + suite.NoError(err) + suite.Equal(resp.ErrorCode, commonpb.ErrorCode_Success) + suite.Len(suite.server.meta.GetByResourceGroup("rg3"), 2) + + // server unhealthy + server.status.Store(commonpb.StateCode_Abnormal) + resp, err = suite.server.TransferReplica(ctx, &querypb.TransferReplicaRequest{ + SourceResourceGroup: meta.DefaultResourceGroupName, + TargetResourceGroup: "rg3", + CollectionID: 1, + NumReplica: 2, + }) + + suite.NoError(err) + suite.Equal(resp.ErrorCode, commonpb.ErrorCode_UnexpectedError) +} + func (suite *ServiceSuite) TestLoadCollectionFailed() { suite.loadAll() ctx := context.Background() @@ -365,6 +621,19 @@ func (suite *ServiceSuite) TestLoadCollectionFailed() { suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) suite.Contains(resp.Reason, job.ErrLoadParameterMismatched.Error()) } + + // Test load with wrong rg num + for _, collection := range suite.collections { + req := &querypb.LoadCollectionRequest{ + CollectionID: collection, + ReplicaNumber: suite.replicaNumber[collection] + 1, + ResourceGroups: []string{"rg1", "rg2"}, + } + resp, err := server.LoadCollection(ctx, req) + suite.NoError(err) + suite.Equal(commonpb.ErrorCode_IllegalArgument, resp.ErrorCode) + suite.Contains(resp.Reason, ErrLoadUseWrongRG.Error()) + } } func (suite *ServiceSuite) TestLoadPartition() { @@ -756,8 +1025,9 @@ func (suite *ServiceSuite) TestLoadBalance() { // Test get balance first segment for _, collection := range suite.collections { replicas := suite.meta.ReplicaManager.GetByCollection(collection) - srcNode := replicas[0].GetNodes()[0] - dstNode := replicas[0].GetNodes()[1] + nodes := replicas[0].GetNodes() + srcNode := nodes[0] + dstNode := nodes[1] suite.updateCollectionStatus(collection, querypb.LoadStatus_Loaded) suite.updateSegmentDist(collection, srcNode) segments := suite.getAllSegments(collection) @@ -883,8 +1153,9 @@ func (suite *ServiceSuite) TestLoadBalanceFailed() { // Test load balance with not fully loaded for _, collection := range suite.collections { replicas := suite.meta.ReplicaManager.GetByCollection(collection) - srcNode := replicas[0].GetNodes()[0] - dstNode := replicas[0].GetNodes()[1] + nodes := replicas[0].GetNodes() + srcNode := nodes[0] + dstNode := nodes[1] suite.updateCollectionStatus(collection, querypb.LoadStatus_Loading) segments := suite.getAllSegments(collection) req := &querypb.LoadBalanceRequest{ @@ -926,8 +1197,9 @@ func (suite *ServiceSuite) TestLoadBalanceFailed() { // Test balance task failed for _, collection := range suite.collections { replicas := suite.meta.ReplicaManager.GetByCollection(collection) - srcNode := replicas[0].GetNodes()[0] - dstNode := replicas[0].GetNodes()[1] + nodes := replicas[0].GetNodes() + srcNode := nodes[0] + dstNode := nodes[1] suite.updateCollectionStatus(collection, querypb.LoadStatus_Loaded) suite.updateSegmentDist(collection, srcNode) segments := suite.getAllSegments(collection) @@ -1171,6 +1443,11 @@ func (suite *ServiceSuite) TestGetShardLeadersFailed() { suite.Equal(commonpb.ErrorCode_NoReplicaAvailable, resp.Status.ErrorCode) // Segment not fully loaded + for _, node := range suite.nodes { + suite.dist.SegmentDistManager.Update(node) + suite.dist.ChannelDistManager.Update(node) + suite.dist.LeaderViewManager.Update(node) + } suite.updateChannelDistWithoutSegment(collection) suite.fetchHeartbeats(time.Now()) resp, err = server.GetShardLeaders(ctx, req) diff --git a/internal/querycoordv2/task/task_test.go b/internal/querycoordv2/task/task_test.go index 66383edb53..35c25311b9 100644 --- a/internal/querycoordv2/task/task_test.go +++ b/internal/querycoordv2/task/task_test.go @@ -130,7 +130,7 @@ func (suite *TaskSuite) SetupTest() { suite.kv = etcdkv.NewEtcdKV(cli, config.MetaRootPath.GetValue()) suite.store = meta.NewMetaStore(suite.kv) - suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store) + suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), suite.store, session.NewNodeManager()) suite.dist = meta.NewDistributionManager() suite.broker = meta.NewMockBroker(suite.T()) suite.target = meta.NewTargetManager(suite.broker, suite.meta) @@ -1260,14 +1260,14 @@ func (suite *TaskSuite) newScheduler() *taskScheduler { } func createReplica(collection int64, nodes ...int64) *meta.Replica { - return &meta.Replica{ - Replica: &querypb.Replica{ + return meta.NewReplica( + &querypb.Replica{ ID: rand.Int63()/2 + 1, CollectionID: collection, Nodes: nodes, }, - Nodes: typeutil.NewUniqueSet(nodes...), - } + typeutil.NewUniqueSet(nodes...), + ) } func TestTask(t *testing.T) { diff --git a/internal/querycoordv2/utils/meta.go b/internal/querycoordv2/utils/meta.go index 3b4f067bd7..bdf06cb0d4 100644 --- a/internal/querycoordv2/utils/meta.go +++ b/internal/querycoordv2/utils/meta.go @@ -18,12 +18,22 @@ package utils import ( "context" + "errors" "fmt" "math/rand" + "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/querycoordv2/meta" "github.com/milvus-io/milvus/internal/querycoordv2/session" "github.com/samber/lo" + "go.uber.org/zap" +) + +var ( + ErrGetNodesFromRG = errors.New("failed to get node from rg") + ErrNoReplicaFound = errors.New("no replica found during assign nodes") + ErrReplicasInconsistent = errors.New("all replicas should belong to same collection during assign nodes") + ErrUseWrongNumRG = errors.New("resource num can only be 0, 1 or same as replica number") ) func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager, replicaID int64) []*session.NodeInfo { @@ -32,8 +42,8 @@ func GetReplicaNodesInfo(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeM return nil } - nodes := make([]*session.NodeInfo, 0, len(replica.Nodes)) - for node := range replica.Nodes { + nodes := make([]*session.NodeInfo, 0, len(replica.GetNodes())) + for _, node := range replica.GetNodes() { nodes = append(nodes, nodeMgr.Get(node)) } return nodes @@ -64,7 +74,7 @@ func GroupNodesByReplica(replicaMgr *meta.ReplicaManager, collectionID int64, no replicas := replicaMgr.GetByCollection(collectionID) for _, replica := range replicas { for _, node := range nodes { - if replica.Nodes.Contain(node) { + if replica.Contains(node) { ret[replica.ID] = append(ret[replica.ID], node) } } @@ -90,7 +100,7 @@ func GroupSegmentsByReplica(replicaMgr *meta.ReplicaManager, collectionID int64, replicas := replicaMgr.GetByCollection(collectionID) for _, replica := range replicas { for _, segment := range segments { - if replica.Nodes.Contain(segment.Node) { + if replica.Contains(segment.Node) { ret[replica.ID] = append(ret[replica.ID], segment) } } @@ -101,24 +111,92 @@ func GroupSegmentsByReplica(replicaMgr *meta.ReplicaManager, collectionID int64, // AssignNodesToReplicas assigns nodes to the given replicas, // all given replicas must be the same collection, // the given replicas have to be not in ReplicaManager -func AssignNodesToReplicas(nodeMgr *session.NodeManager, replicas ...*meta.Replica) { - replicaNumber := len(replicas) - nodes := nodeMgr.GetAll() - rand.Shuffle(len(nodes), func(i, j int) { - nodes[i], nodes[j] = nodes[j], nodes[i] +func AssignNodesToReplicas(m *meta.Meta, rgName string, replicas ...*meta.Replica) error { + replicaIDs := lo.Map(replicas, func(r *meta.Replica, _ int) int64 { return r.GetID() }) + log := log.With(zap.Int64("collectionID", replicas[0].GetCollectionID()), + zap.Int64s("replicas", replicaIDs), + zap.String("rgName", rgName), + ) + if len(replicaIDs) == 0 { + return nil + } + + nodeGroup, err := m.ResourceManager.GetNodes(rgName) + if err != nil { + log.Error("failed to get nodes", zap.Error(err)) + return err + } + + if len(nodeGroup) < len(replicaIDs) { + log.Error(meta.ErrNodeNotEnough.Error()) + return meta.ErrNodeNotEnough + } + + rand.Shuffle(len(nodeGroup), func(i, j int) { + nodeGroup[i], nodeGroup[j] = nodeGroup[j], nodeGroup[i] }) - for i, node := range nodes { - replicas[i%replicaNumber].AddNode(node.ID()) + log.Info("assign nodes to replicas", + zap.Int64s("nodes", nodeGroup), + ) + for i, node := range nodeGroup { + replicas[i%len(replicas)].AddNode(node) } + + return nil } // SpawnReplicas spawns replicas for given collection, assign nodes to them, and save them -func SpawnReplicas(replicaMgr *meta.ReplicaManager, nodeMgr *session.NodeManager, collection int64, replicaNumber int32) ([]*meta.Replica, error) { - replicas, err := replicaMgr.Spawn(collection, replicaNumber) +func SpawnAllReplicasInRG(m *meta.Meta, collection int64, replicaNumber int32, rgName string) ([]*meta.Replica, error) { + replicas, err := m.ReplicaManager.Spawn(collection, replicaNumber, rgName) if err != nil { return nil, err } - AssignNodesToReplicas(nodeMgr, replicas...) - return replicas, replicaMgr.Put(replicas...) + err = AssignNodesToReplicas(m, rgName, replicas...) + if err != nil { + return nil, err + } + return replicas, m.ReplicaManager.Put(replicas...) +} + +func checkResourceGroup(collectionID int64, replicaNumber int32, resourceGroups []string) error { + if len(resourceGroups) != 0 && len(resourceGroups) != 1 && len(resourceGroups) != int(replicaNumber) { + return ErrUseWrongNumRG + } + + return nil +} + +func SpawnReplicasWithRG(m *meta.Meta, collection int64, resourceGroups []string, replicaNumber int32) ([]*meta.Replica, error) { + if err := checkResourceGroup(collection, replicaNumber, resourceGroups); err != nil { + return nil, err + } + + if len(resourceGroups) == 0 { + return SpawnAllReplicasInRG(m, collection, replicaNumber, meta.DefaultResourceGroupName) + } + + if len(resourceGroups) == 1 { + return SpawnAllReplicasInRG(m, collection, replicaNumber, resourceGroups[0]) + } + + replicaSet := make([]*meta.Replica, 0) + for _, rgName := range resourceGroups { + if !m.ResourceManager.ContainResourceGroup(rgName) { + return nil, meta.ErrRGNotExist + } + + replicas, err := m.ReplicaManager.Spawn(collection, 1, rgName) + if err != nil { + return nil, err + } + + err = AssignNodesToReplicas(m, rgName, replicas...) + if err != nil { + return nil, err + } + replicaSet = append(replicaSet, replicas...) + } + + return replicaSet, m.ReplicaManager.Put(replicaSet...) } diff --git a/internal/querycoordv2/utils/meta_test.go b/internal/querycoordv2/utils/meta_test.go new file mode 100644 index 0000000000..502ea4ce7e --- /dev/null +++ b/internal/querycoordv2/utils/meta_test.go @@ -0,0 +1,110 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "testing" + + etcdKV "github.com/milvus-io/milvus/internal/kv/etcd" + "github.com/milvus-io/milvus/internal/querycoordv2/meta" + . "github.com/milvus-io/milvus/internal/querycoordv2/params" + "github.com/milvus-io/milvus/internal/querycoordv2/session" + "github.com/milvus-io/milvus/internal/util/etcd" +) + +func TestSpawnReplicasWithRG(t *testing.T) { + Params.Init() + config := GenerateEtcdConfig() + cli, _ := etcd.GetEtcdClient( + config.UseEmbedEtcd.GetAsBool(), + config.EtcdUseSSL.GetAsBool(), + config.Endpoints.GetAsStrings(), + config.EtcdTLSCert.GetValue(), + config.EtcdTLSKey.GetValue(), + config.EtcdTLSCACert.GetValue(), + config.EtcdTLSMinVersion.GetValue()) + kv := etcdKV.NewEtcdKV(cli, config.MetaRootPath.GetValue()) + + store := meta.NewMetaStore(kv) + nodeMgr := session.NewNodeManager() + m := meta.NewMeta(RandomIncrementIDAllocator(), store, nodeMgr) + m.ResourceManager.AddResourceGroup("rg1") + m.ResourceManager.AddResourceGroup("rg2") + m.ResourceManager.AddResourceGroup("rg3") + + for i := 1; i < 10; i++ { + nodeMgr.Add(session.NewNodeInfo(int64(i), "localhost")) + + if i%3 == 0 { + m.ResourceManager.AssignNode("rg1", int64(i)) + } + if i%3 == 1 { + m.ResourceManager.AssignNode("rg2", int64(i)) + } + if i%3 == 2 { + m.ResourceManager.AssignNode("rg3", int64(i)) + } + } + + type args struct { + m *meta.Meta + collection int64 + resourceGroups []string + replicaNumber int32 + } + + tests := []struct { + name string + args args + wantReplicaNum int + wantErr bool + }{ + { + name: "test 3 replica on 1 rg", + args: args{m, 1000, []string{"rg1"}, 3}, + wantReplicaNum: 3, + wantErr: false, + }, + + { + name: "test 3 replica on 2 rg", + args: args{m, 1000, []string{"rg1", "rg2"}, 3}, + wantReplicaNum: 0, + wantErr: true, + }, + + { + name: "test 3 replica on 3 rg", + args: args{m, 1000, []string{"rg1", "rg2", "rg3"}, 3}, + wantReplicaNum: 3, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SpawnReplicasWithRG(tt.args.m, tt.args.collection, tt.args.resourceGroups, tt.args.replicaNumber) + if (err != nil) != tt.wantErr { + t.Errorf("SpawnReplicasWithRG() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if len(got) != tt.wantReplicaNum { + t.Errorf("SpawnReplicasWithRG() = %v, want %d replicas", got, tt.args.replicaNumber) + } + }) + } +} diff --git a/internal/querycoordv2/utils/test.go b/internal/querycoordv2/utils/test.go index 70e955f4b8..6a3294e007 100644 --- a/internal/querycoordv2/utils/test.go +++ b/internal/querycoordv2/utils/test.go @@ -52,14 +52,15 @@ func CreateTestChannel(collection, node, version int64, channel string) *meta.Dm } func CreateTestReplica(id, collectionID int64, nodes []int64) *meta.Replica { - return &meta.Replica{ - Replica: &querypb.Replica{ - ID: id, - CollectionID: collectionID, - Nodes: nodes, + return meta.NewReplica( + &querypb.Replica{ + ID: id, + CollectionID: collectionID, + Nodes: nodes, + ResourceGroup: meta.DefaultResourceGroupName, }, - Nodes: typeutil.NewUniqueSet(nodes...), - } + typeutil.NewUniqueSet(nodes...), + ) } func CreateTestCollection(collection int64, replica int32) *meta.Collection { diff --git a/internal/querycoordv2/utils/types.go b/internal/querycoordv2/utils/types.go index 11f4787b63..28d17eec87 100644 --- a/internal/querycoordv2/utils/types.go +++ b/internal/querycoordv2/utils/types.go @@ -20,7 +20,6 @@ import ( "fmt" "github.com/milvus-io/milvus-proto/go-api/commonpb" - "github.com/milvus-io/milvus-proto/go-api/milvuspb" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/querycoordv2/meta" @@ -148,11 +147,3 @@ func MergeDmChannelInfo(infos []*datapb.VchannelInfo) *meta.DmChannel { return dmChannel } - -func Replica2ReplicaInfo(replica *querypb.Replica) *milvuspb.ReplicaInfo { - return &milvuspb.ReplicaInfo{ - ReplicaID: replica.GetID(), - CollectionID: replica.GetCollectionID(), - NodeIds: replica.GetNodes(), - } -} diff --git a/internal/types/types.go b/internal/types/types.go index 94b4d92509..f6aab4108c 100644 --- a/internal/types/types.go +++ b/internal/types/types.go @@ -1304,6 +1304,13 @@ type ProxyComponent interface { // RenameCollection rename collection from old name to new name RenameCollection(ctx context.Context, req *milvuspb.RenameCollectionRequest) (*commonpb.Status, error) + + CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) + DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) + TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) + TransferReplica(ctx context.Context, req *milvuspb.TransferReplicaRequest) (*commonpb.Status, error) + ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) + DescribeResourceGroup(ctx context.Context, req *milvuspb.DescribeResourceGroupRequest) (*milvuspb.DescribeResourceGroupResponse, error) } // QueryNode is the interface `querynode` package implements @@ -1376,6 +1383,13 @@ type QueryCoord interface { GetShardLeaders(ctx context.Context, req *querypb.GetShardLeadersRequest) (*querypb.GetShardLeadersResponse, error) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) + + CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) + DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) + TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest) (*commonpb.Status, error) + TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest) (*commonpb.Status, error) + ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) + DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest) (*querypb.DescribeResourceGroupResponse, error) } // QueryCoordComponent is used by grpc server of QueryCoord diff --git a/internal/util/mock/grpc_querycoord_client.go b/internal/util/mock/grpc_querycoord_client.go index 8af02c8a05..6461fc6d85 100644 --- a/internal/util/mock/grpc_querycoord_client.go +++ b/internal/util/mock/grpc_querycoord_client.go @@ -101,3 +101,27 @@ func (m *GrpcQueryCoordClient) GetReplicas(ctx context.Context, in *milvuspb.Get func (m *GrpcQueryCoordClient) GetShardLeaders(ctx context.Context, in *querypb.GetShardLeadersRequest, opts ...grpc.CallOption) (*querypb.GetShardLeadersResponse, error) { return &querypb.GetShardLeadersResponse{}, m.Err } + +func (m *GrpcQueryCoordClient) CreateResourceGroup(ctx context.Context, req *milvuspb.CreateResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + +func (m *GrpcQueryCoordClient) DropResourceGroup(ctx context.Context, req *milvuspb.DropResourceGroupRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + +func (m *GrpcQueryCoordClient) TransferNode(ctx context.Context, req *milvuspb.TransferNodeRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + +func (m *GrpcQueryCoordClient) TransferReplica(ctx context.Context, req *querypb.TransferReplicaRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { + return &commonpb.Status{}, m.Err +} + +func (m *GrpcQueryCoordClient) ListResourceGroups(ctx context.Context, req *milvuspb.ListResourceGroupsRequest, opts ...grpc.CallOption) (*milvuspb.ListResourceGroupsResponse, error) { + return &milvuspb.ListResourceGroupsResponse{}, m.Err +} + +func (m *GrpcQueryCoordClient) DescribeResourceGroup(ctx context.Context, req *querypb.DescribeResourceGroupRequest, opts ...grpc.CallOption) (*querypb.DescribeResourceGroupResponse, error) { + return &querypb.DescribeResourceGroupResponse{}, m.Err +} diff --git a/internal/util/paramtable/component_param.go b/internal/util/paramtable/component_param.go index baf91f5620..571557d236 100644 --- a/internal/util/paramtable/component_param.go +++ b/internal/util/paramtable/component_param.go @@ -888,8 +888,11 @@ type queryCoordConfig struct { CheckHandoffInterval ParamItem `refreshable:"true"` EnableActiveStandby ParamItem `refreshable:"false"` - NextTargetSurviveTime ParamItem `refreshable:"true"` - UpdateNextTargetInterval ParamItem `refreshable:"false"` + NextTargetSurviveTime ParamItem `refreshable:"true"` + UpdateNextTargetInterval ParamItem `refreshable:"false"` + CheckNodeInReplicaInterval ParamItem `refreshable:"false"` + CheckResourceGroupInterval ParamItem `refreshable:"false"` + EnableRGAutoRecover ParamItem `refreshable:"true"` } func (p *queryCoordConfig) init(base *BaseTable) { @@ -1040,6 +1043,30 @@ func (p *queryCoordConfig) init(base *BaseTable) { PanicIfEmpty: true, } p.UpdateNextTargetInterval.Init(base.mgr) + + p.CheckNodeInReplicaInterval = ParamItem{ + Key: "queryCoord.checkNodeInReplicaInterval", + Version: "2.2.3", + DefaultValue: "60", + PanicIfEmpty: true, + } + p.CheckNodeInReplicaInterval.Init(base.mgr) + + p.CheckResourceGroupInterval = ParamItem{ + Key: "queryCoord.checkResourceGroupInterval", + Version: "2.2.3", + DefaultValue: "30", + PanicIfEmpty: true, + } + p.CheckResourceGroupInterval.Init(base.mgr) + + p.EnableRGAutoRecover = ParamItem{ + Key: "queryCoord.enableRGAutoRecover", + Version: "2.2.3", + DefaultValue: "true", + PanicIfEmpty: true, + } + p.EnableRGAutoRecover.Init(base.mgr) } // ///////////////////////////////////////////////////////////////////////////// diff --git a/internal/util/paramtable/component_param_test.go b/internal/util/paramtable/component_param_test.go index 0bdccffa8a..f7d2f1a8c0 100644 --- a/internal/util/paramtable/component_param_test.go +++ b/internal/util/paramtable/component_param_test.go @@ -243,6 +243,28 @@ func TestComponentParam(t *testing.T) { Params := params.QueryCoordCfg assert.Equal(t, Params.EnableActiveStandby.GetAsBool(), false) t.Logf("queryCoord EnableActiveStandby = %t", Params.EnableActiveStandby.GetAsBool()) + + params.Save("queryCoord.NextTargetSurviveTime", "100") + NextTargetSurviveTime := Params.NextTargetSurviveTime + assert.Equal(t, int64(100), NextTargetSurviveTime.GetAsInt64()) + + params.Save("queryCoord.UpdateNextTargetInterval", "100") + UpdateNextTargetInterval := Params.UpdateNextTargetInterval + assert.Equal(t, int64(100), UpdateNextTargetInterval.GetAsInt64()) + + params.Save("queryCoord.checkNodeInReplicaInterval", "100") + checkNodeInReplicaInterval := Params.CheckNodeInReplicaInterval + assert.Equal(t, 100, checkNodeInReplicaInterval.GetAsInt()) + + params.Save("queryCoord.checkResourceGroupInterval", "10") + checkResourceGroupInterval := Params.CheckResourceGroupInterval + assert.Equal(t, 10, checkResourceGroupInterval.GetAsInt()) + + enableResourceGroupAutoRecover := Params.EnableRGAutoRecover + assert.Equal(t, true, enableResourceGroupAutoRecover.GetAsBool()) + params.Save("queryCoord.enableRGAutoRecover", "false") + enableResourceGroupAutoRecover = Params.EnableRGAutoRecover + assert.Equal(t, false, enableResourceGroupAutoRecover.GetAsBool()) }) t.Run("test queryNodeConfig", func(t *testing.T) {