From a5e2d6b6fb924afcff335d836879cce6c55ca91f Mon Sep 17 00:00:00 2001 From: Jiquan Long Date: Mon, 5 Sep 2022 13:29:11 +0800 Subject: [PATCH] Refactor RootCoord (#18930) Signed-off-by: longjiquan Co-authored-by: xaxys Signed-off-by: longjiquan Co-authored-by: xaxys --- internal/allocator/mock_global_id.go | 24 + internal/common/byte_slice.go | 19 + internal/common/byte_slice_test.go | 43 + internal/common/key_data_pairs.go | 36 + internal/common/key_data_pairs_test.go | 41 + internal/common/key_value_pairs.go | 36 + internal/common/key_value_pairs_test.go | 42 + internal/common/map.go | 24 + internal/common/map_test.go | 37 + internal/common/string_list.go | 27 + internal/common/string_list_test.go | 39 + internal/core/src/pb/schema.pb.cc | 190 +- internal/core/src/pb/schema.pb.h | 53 + internal/datacoord/mock_test.go | 4 - internal/distributed/proxy/client/client.go | 40 - .../distributed/proxy/client/client_test.go | 18 - internal/distributed/proxy/service.go | 13 - internal/distributed/proxy/service_test.go | 21 - .../distributed/rootcoord/client/client.go | 14 - .../rootcoord/client/client_test.go | 8 - internal/distributed/rootcoord/service.go | 5 - .../distributed/rootcoord/service_test.go | 600 --- internal/kv/kv.go | 2 + internal/kv/mock_txn_kv.go | 59 + internal/kv/mocks/SnapShotKV.go | 120 + internal/kv/mocks/TxnKV.go | 218 + internal/log/meta_logger_test.go | 2 - internal/metastore/catalog.go | 14 + internal/metastore/catalog_test.go | 35 + internal/metastore/db/dao/collection.go | 24 + internal/metastore/db/dao/collection_test.go | 66 +- internal/metastore/db/dao/partition.go | 21 + internal/metastore/db/dao/partition_test.go | 55 +- internal/metastore/db/dbmodel/collection.go | 2 + .../db/dbmodel/mocks/ICollectionDb.go | 14 + .../db/dbmodel/mocks/IPartitionDb.go | 14 + .../db/dbmodel/mocks/ISegmentIndexDb.go | 80 +- internal/metastore/db/dbmodel/partition.go | 2 + .../metastore/db/rootcoord/table_catalog.go | 72 + .../db/rootcoord/table_catalog_test.go | 117 + internal/metastore/kv/rootcoord/kv_catalog.go | 136 +- .../metastore/kv/rootcoord/kv_catalog_test.go | 326 ++ .../metastore/kv/rootcoord/suffix_snapshot.go | 2 +- internal/metastore/model/alias.go | 21 + internal/metastore/model/alias_test.go | 91 + internal/metastore/model/collection.go | 35 +- internal/metastore/model/field.go | 58 + internal/metastore/model/field_test.go | 84 + internal/metastore/model/partition.go | 50 +- internal/metastore/model/partition_test.go | 47 + internal/mq/msgstream/mock_mq_factory.go | 16 + internal/mq/msgstream/mock_msgstream.go | 24 + internal/proto/etcd_meta.proto | 24 + internal/proto/etcdpb/etcd_meta.pb.go | 265 +- internal/proto/proxy.proto | 11 - internal/proto/proxypb/proxy.pb.go | 242 +- internal/proto/root_coord.proto | 4 +- internal/proto/rootcoordpb/root_coord.pb.go | 228 +- internal/proto/schema.proto | 8 + internal/proto/schemapb/schema.pb.go | 171 +- internal/proxy/impl.go | 39 - internal/proxy/proxy_test.go | 84 +- internal/proxy/rootcoord_mock_test.go | 14 - internal/proxy/task.go | 4 +- internal/proxy/task_test.go | 2 +- .../querycoord/mock_3rd_component_test.go | 17 - internal/rootcoord/alter_alias_task.go | 28 + internal/rootcoord/alter_alias_task_test.go | 54 + internal/rootcoord/broker.go | 226 ++ internal/rootcoord/broker_test.go | 302 ++ internal/rootcoord/constrant.go | 11 + internal/rootcoord/create_alias_task.go | 26 + internal/rootcoord/create_alias_task_test.go | 40 + internal/rootcoord/create_collection_task.go | 294 ++ .../rootcoord/create_collection_task_test.go | 502 +++ internal/rootcoord/create_partition_task.go | 69 + .../rootcoord/create_partition_task_test.go | 126 + .../rootcoord/describe_collection_task.go | 73 + .../describe_collection_task_test.go | 115 + internal/rootcoord/drop_alias_task.go | 29 + internal/rootcoord/drop_alias_task_test.go | 82 + internal/rootcoord/drop_collection_task.go | 94 + .../rootcoord/drop_collection_task_test.go | 216 + internal/rootcoord/drop_partition_task.go | 90 + .../rootcoord/drop_partition_task_test.go | 174 + internal/rootcoord/garbage_collector.go | 158 + internal/rootcoord/garbage_collector_test.go | 208 + internal/rootcoord/has_collection_task.go | 35 + .../rootcoord/has_collection_task_test.go | 85 + internal/rootcoord/has_partition_task.go | 43 + internal/rootcoord/has_partition_task_test.go | 127 + internal/rootcoord/import_helper.go | 71 + internal/rootcoord/meta_table.go | 1193 +++--- internal/rootcoord/meta_table_test.go | 859 +--- internal/rootcoord/mock_test.go | 840 ++++ internal/rootcoord/proxy_client_manager.go | 45 +- .../rootcoord/proxy_client_manager_test.go | 378 +- internal/rootcoord/redo.go | 57 + internal/rootcoord/redo_test.go | 119 + internal/rootcoord/root_coord.go | 1964 ++++----- internal/rootcoord/root_coord_test.go | 3574 ++++------------- internal/rootcoord/scheduler.go | 100 + internal/rootcoord/scheduler_test.go | 168 + internal/rootcoord/show_collection_task.go | 47 + .../rootcoord/show_collection_task_test.go | 88 + internal/rootcoord/show_partition_task.go | 51 + .../rootcoord/show_partition_task_test.go | 119 + internal/rootcoord/step.go | 165 + internal/rootcoord/task.go | 954 ----- internal/rootcoord/task_test.go | 192 - internal/rootcoord/task_v2.go | 66 + internal/rootcoord/timestamp_bench_test.go | 72 + internal/rootcoord/timestamp_test.go | 137 - internal/rootcoord/timeticksync.go | 69 +- internal/rootcoord/timeticksync_test.go | 11 - internal/rootcoord/undo.go | 59 + internal/rootcoord/undo_test.go | 93 + internal/rootcoord/util.go | 39 +- internal/tso/mock_global_allocator.go | 43 + internal/types/types.go | 30 - internal/util/mock/grpc_proxy_client.go | 12 - internal/util/mock/grpc_rootcoord_client.go | 4 - scripts/sql/meta.sql | 2 + tests/python_client/testcases/test_alias.py | 23 +- .../testcases/test_collection.py | 10 +- tests/python_client/testcases/test_utility.py | 15 +- 126 files changed, 10348 insertions(+), 8782 deletions(-) create mode 100644 internal/allocator/mock_global_id.go create mode 100644 internal/common/byte_slice.go create mode 100644 internal/common/byte_slice_test.go create mode 100644 internal/common/key_data_pairs.go create mode 100644 internal/common/key_data_pairs_test.go create mode 100644 internal/common/key_value_pairs.go create mode 100644 internal/common/key_value_pairs_test.go create mode 100644 internal/common/map.go create mode 100644 internal/common/map_test.go create mode 100644 internal/common/string_list.go create mode 100644 internal/common/string_list_test.go create mode 100644 internal/kv/mock_txn_kv.go create mode 100644 internal/kv/mocks/SnapShotKV.go create mode 100644 internal/kv/mocks/TxnKV.go create mode 100644 internal/metastore/catalog_test.go create mode 100644 internal/metastore/model/alias_test.go create mode 100644 internal/metastore/model/partition_test.go create mode 100644 internal/mq/msgstream/mock_mq_factory.go create mode 100644 internal/mq/msgstream/mock_msgstream.go create mode 100644 internal/rootcoord/alter_alias_task.go create mode 100644 internal/rootcoord/alter_alias_task_test.go create mode 100644 internal/rootcoord/broker.go create mode 100644 internal/rootcoord/broker_test.go create mode 100644 internal/rootcoord/constrant.go create mode 100644 internal/rootcoord/create_alias_task.go create mode 100644 internal/rootcoord/create_alias_task_test.go create mode 100644 internal/rootcoord/create_collection_task.go create mode 100644 internal/rootcoord/create_collection_task_test.go create mode 100644 internal/rootcoord/create_partition_task.go create mode 100644 internal/rootcoord/create_partition_task_test.go create mode 100644 internal/rootcoord/describe_collection_task.go create mode 100644 internal/rootcoord/describe_collection_task_test.go create mode 100644 internal/rootcoord/drop_alias_task.go create mode 100644 internal/rootcoord/drop_alias_task_test.go create mode 100644 internal/rootcoord/drop_collection_task.go create mode 100644 internal/rootcoord/drop_collection_task_test.go create mode 100644 internal/rootcoord/drop_partition_task.go create mode 100644 internal/rootcoord/drop_partition_task_test.go create mode 100644 internal/rootcoord/garbage_collector.go create mode 100644 internal/rootcoord/garbage_collector_test.go create mode 100644 internal/rootcoord/has_collection_task.go create mode 100644 internal/rootcoord/has_collection_task_test.go create mode 100644 internal/rootcoord/has_partition_task.go create mode 100644 internal/rootcoord/has_partition_task_test.go create mode 100644 internal/rootcoord/import_helper.go create mode 100644 internal/rootcoord/mock_test.go create mode 100644 internal/rootcoord/redo.go create mode 100644 internal/rootcoord/redo_test.go create mode 100644 internal/rootcoord/scheduler.go create mode 100644 internal/rootcoord/scheduler_test.go create mode 100644 internal/rootcoord/show_collection_task.go create mode 100644 internal/rootcoord/show_collection_task_test.go create mode 100644 internal/rootcoord/show_partition_task.go create mode 100644 internal/rootcoord/show_partition_task_test.go create mode 100644 internal/rootcoord/step.go delete mode 100644 internal/rootcoord/task.go delete mode 100644 internal/rootcoord/task_test.go create mode 100644 internal/rootcoord/task_v2.go create mode 100644 internal/rootcoord/timestamp_bench_test.go delete mode 100644 internal/rootcoord/timestamp_test.go create mode 100644 internal/rootcoord/undo.go create mode 100644 internal/rootcoord/undo_test.go create mode 100644 internal/tso/mock_global_allocator.go diff --git a/internal/allocator/mock_global_id.go b/internal/allocator/mock_global_id.go new file mode 100644 index 0000000000..d3709ed8c9 --- /dev/null +++ b/internal/allocator/mock_global_id.go @@ -0,0 +1,24 @@ +package allocator + +type MockGIDAllocator struct { + GIDAllocator + AllocF func(count uint32) (UniqueID, UniqueID, error) + AllocOneF func() (UniqueID, error) + UpdateIDF func() error +} + +func (m MockGIDAllocator) Alloc(count uint32) (UniqueID, UniqueID, error) { + return m.AllocF(count) +} + +func (m MockGIDAllocator) AllocOne() (UniqueID, error) { + return m.AllocOneF() +} + +func (m MockGIDAllocator) UpdateID() error { + return m.UpdateIDF() +} + +func NewMockGIDAllocator() *MockGIDAllocator { + return &MockGIDAllocator{} +} diff --git a/internal/common/byte_slice.go b/internal/common/byte_slice.go new file mode 100644 index 0000000000..03bc17dae8 --- /dev/null +++ b/internal/common/byte_slice.go @@ -0,0 +1,19 @@ +package common + +import "reflect" + +type ByteSlice []byte + +func (s ByteSlice) Clone() ByteSlice { + clone := make(ByteSlice, len(s)) + copy(clone, s) + return clone +} + +func (s ByteSlice) Equal(other ByteSlice) bool { + return reflect.DeepEqual(s, other) +} + +func CloneByteSlice(s ByteSlice) ByteSlice { + return s.Clone() +} diff --git a/internal/common/byte_slice_test.go b/internal/common/byte_slice_test.go new file mode 100644 index 0000000000..46f0c492d1 --- /dev/null +++ b/internal/common/byte_slice_test.go @@ -0,0 +1,43 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloneByteSlice(t *testing.T) { + type args struct { + s ByteSlice + } + tests := []struct { + name string + args args + want ByteSlice + }{ + { + args: args{s: []byte{0x0}}, + want: []byte{0x0}, + }, + { + args: args{s: []byte{0xff}}, + want: []byte{0xff}, + }, + { + args: args{s: []byte{0x0f}}, + want: []byte{0x0f}, + }, + { + args: args{s: []byte{0xf0}}, + want: []byte{0xf0}, + }, { + args: args{s: []byte{0x0, 0xff, 0x0f, 0xf0}}, + want: []byte{0x0, 0xff, 0x0f, 0xf0}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.True(t, tt.want.Equal(tt.args.s)) + }) + } +} diff --git a/internal/common/key_data_pairs.go b/internal/common/key_data_pairs.go new file mode 100644 index 0000000000..329b8d1928 --- /dev/null +++ b/internal/common/key_data_pairs.go @@ -0,0 +1,36 @@ +package common + +import ( + "reflect" + + "github.com/milvus-io/milvus/internal/proto/commonpb" +) + +type KeyDataPairs []*commonpb.KeyDataPair + +func (pairs KeyDataPairs) Clone() KeyDataPairs { + clone := make(KeyDataPairs, 0, len(pairs)) + for _, pair := range pairs { + clone = append(clone, &commonpb.KeyDataPair{ + Key: pair.GetKey(), + Data: CloneByteSlice(pair.GetData()), + }) + } + return clone +} + +func (pairs KeyDataPairs) ToMap() map[string][]byte { + ret := make(map[string][]byte) + for _, pair := range pairs { + ret[pair.GetKey()] = CloneByteSlice(pair.GetData()) + } + return ret +} + +func (pairs KeyDataPairs) Equal(other KeyDataPairs) bool { + return reflect.DeepEqual(pairs.ToMap(), other.ToMap()) +} + +func CloneKeyDataPairs(pairs KeyDataPairs) KeyDataPairs { + return pairs.Clone() +} diff --git a/internal/common/key_data_pairs_test.go b/internal/common/key_data_pairs_test.go new file mode 100644 index 0000000000..922ff92a98 --- /dev/null +++ b/internal/common/key_data_pairs_test.go @@ -0,0 +1,41 @@ +package common + +import ( + "testing" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/stretchr/testify/assert" +) + +func TestCloneKeyDataPairs(t *testing.T) { + type args struct { + pairs KeyDataPairs + } + tests := []struct { + name string + args args + }{ + { + args: args{ + pairs: nil, + }, + }, + { + args: args{ + pairs: []*commonpb.KeyDataPair{ + {Key: "k1", Data: []byte("v1")}, + {Key: "k2", Data: []byte("v2")}, + {Key: "k3", Data: []byte("v3")}, + {Key: "k4", Data: []byte("v4")}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clone := CloneKeyDataPairs(tt.args.pairs) + assert.True(t, clone.Equal(tt.args.pairs)) + }) + } +} diff --git a/internal/common/key_value_pairs.go b/internal/common/key_value_pairs.go new file mode 100644 index 0000000000..3b0e81ff72 --- /dev/null +++ b/internal/common/key_value_pairs.go @@ -0,0 +1,36 @@ +package common + +import ( + "reflect" + + "github.com/milvus-io/milvus/internal/proto/commonpb" +) + +type KeyValuePairs []*commonpb.KeyValuePair + +func (pairs KeyValuePairs) Clone() KeyValuePairs { + clone := make(KeyValuePairs, 0, len(pairs)) + for _, pair := range pairs { + clone = append(clone, &commonpb.KeyValuePair{ + Key: pair.GetKey(), + Value: pair.GetValue(), + }) + } + return clone +} + +func (pairs KeyValuePairs) ToMap() map[string]string { + ret := make(map[string]string) + for _, pair := range pairs { + ret[pair.GetKey()] = pair.GetValue() + } + return ret +} + +func (pairs KeyValuePairs) Equal(other KeyValuePairs) bool { + return reflect.DeepEqual(pairs.ToMap(), other.ToMap()) +} + +func CloneKeyValuePairs(pairs KeyValuePairs) KeyValuePairs { + return pairs.Clone() +} diff --git a/internal/common/key_value_pairs_test.go b/internal/common/key_value_pairs_test.go new file mode 100644 index 0000000000..7ba19581df --- /dev/null +++ b/internal/common/key_value_pairs_test.go @@ -0,0 +1,42 @@ +package common + +import ( + "testing" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/stretchr/testify/assert" +) + +func TestCloneKeyValuePairs(t *testing.T) { + type args struct { + pairs KeyValuePairs + } + tests := []struct { + name string + args args + want KeyValuePairs + }{ + { + args: args{ + pairs: nil, + }, + }, + { + args: args{ + pairs: []*commonpb.KeyValuePair{ + {Key: "k1", Value: "v1"}, + {Key: "k2", Value: "v2"}, + {Key: "k3", Value: "v3"}, + {Key: "k4", Value: "v4"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clone := CloneKeyValuePairs(tt.args.pairs) + assert.True(t, clone.Equal(tt.args.pairs)) + }) + } +} diff --git a/internal/common/map.go b/internal/common/map.go new file mode 100644 index 0000000000..e5c9d21621 --- /dev/null +++ b/internal/common/map.go @@ -0,0 +1,24 @@ +package common + +import "reflect" + +type Str2Str map[string]string + +func (m Str2Str) Clone() Str2Str { + if m == nil { + return nil + } + clone := make(Str2Str) + for key, value := range m { + clone[key] = value + } + return clone +} + +func (m Str2Str) Equal(other Str2Str) bool { + return reflect.DeepEqual(m, other) +} + +func CloneStr2Str(m Str2Str) Str2Str { + return m.Clone() +} diff --git a/internal/common/map_test.go b/internal/common/map_test.go new file mode 100644 index 0000000000..2703609f65 --- /dev/null +++ b/internal/common/map_test.go @@ -0,0 +1,37 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloneStr2Str(t *testing.T) { + type args struct { + m Str2Str + } + tests := []struct { + name string + args args + }{ + { + args: args{ + m: nil, + }, + }, + { + args: args{ + m: map[string]string{ + "k1": "v1", + "k2": "v2", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := CloneStr2Str(tt.args.m) + assert.True(t, got.Equal(tt.args.m)) + }) + } +} diff --git a/internal/common/string_list.go b/internal/common/string_list.go new file mode 100644 index 0000000000..428e9324d2 --- /dev/null +++ b/internal/common/string_list.go @@ -0,0 +1,27 @@ +package common + +type StringList []string + +func (l StringList) Clone() StringList { + clone := make([]string, 0, len(l)) + for _, s := range l { + clone = append(clone, s) + } + return clone +} + +func (l StringList) Equal(other StringList) bool { + if len(l) != len(other) { + return false + } + for i := range l { + if l[i] != other[i] { + return false + } + } + return true +} + +func CloneStringList(l StringList) StringList { + return l.Clone() +} diff --git a/internal/common/string_list_test.go b/internal/common/string_list_test.go new file mode 100644 index 0000000000..b0f8c28c98 --- /dev/null +++ b/internal/common/string_list_test.go @@ -0,0 +1,39 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloneStringList(t *testing.T) { + type args struct { + l StringList + } + tests := []struct { + name string + args args + }{ + { + args: args{ + l: nil, + }, + }, + { + args: args{ + l: []string{"s1", "s2"}, + }, + }, + { + args: args{ + l: []string{"dup", "dup", "dup"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := CloneStringList(tt.args.l) + assert.True(t, got.Equal(tt.args.l)) + }) + } +} diff --git a/internal/core/src/pb/schema.pb.cc b/internal/core/src/pb/schema.pb.cc index 8baabc8a66..a9bae0f82a 100644 --- a/internal/core/src/pb/schema.pb.cc +++ b/internal/core/src/pb/schema.pb.cc @@ -316,7 +316,7 @@ static void InitDefaultsscc_info_VectorField_schema_2eproto() { &scc_info_FloatArray_schema_2eproto.base,}}; static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_schema_2eproto[14]; -static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_schema_2eproto[1]; +static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_schema_2eproto[2]; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_schema_2eproto = nullptr; const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { @@ -333,6 +333,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, type_params_), PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, index_params_), PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, autoid_), + PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, state_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, _internal_metadata_), ~0u, // no _extensions_ @@ -439,19 +440,19 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::milvus::proto::schema::FieldSchema)}, - { 13, -1, sizeof(::milvus::proto::schema::CollectionSchema)}, - { 22, -1, sizeof(::milvus::proto::schema::BoolArray)}, - { 28, -1, sizeof(::milvus::proto::schema::IntArray)}, - { 34, -1, sizeof(::milvus::proto::schema::LongArray)}, - { 40, -1, sizeof(::milvus::proto::schema::FloatArray)}, - { 46, -1, sizeof(::milvus::proto::schema::DoubleArray)}, - { 52, -1, sizeof(::milvus::proto::schema::BytesArray)}, - { 58, -1, sizeof(::milvus::proto::schema::StringArray)}, - { 64, -1, sizeof(::milvus::proto::schema::ScalarField)}, - { 77, -1, sizeof(::milvus::proto::schema::VectorField)}, - { 86, -1, sizeof(::milvus::proto::schema::FieldData)}, - { 97, -1, sizeof(::milvus::proto::schema::IDs)}, - { 105, -1, sizeof(::milvus::proto::schema::SearchResultData)}, + { 14, -1, sizeof(::milvus::proto::schema::CollectionSchema)}, + { 23, -1, sizeof(::milvus::proto::schema::BoolArray)}, + { 29, -1, sizeof(::milvus::proto::schema::IntArray)}, + { 35, -1, sizeof(::milvus::proto::schema::LongArray)}, + { 41, -1, sizeof(::milvus::proto::schema::FloatArray)}, + { 47, -1, sizeof(::milvus::proto::schema::DoubleArray)}, + { 53, -1, sizeof(::milvus::proto::schema::BytesArray)}, + { 59, -1, sizeof(::milvus::proto::schema::StringArray)}, + { 65, -1, sizeof(::milvus::proto::schema::ScalarField)}, + { 78, -1, sizeof(::milvus::proto::schema::VectorField)}, + { 87, -1, sizeof(::milvus::proto::schema::FieldData)}, + { 98, -1, sizeof(::milvus::proto::schema::IDs)}, + { 106, -1, sizeof(::milvus::proto::schema::SearchResultData)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { @@ -473,54 +474,57 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = const char descriptor_table_protodef_schema_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\014schema.proto\022\023milvus.proto.schema\032\014com" - "mon.proto\"\214\002\n\013FieldSchema\022\017\n\007fieldID\030\001 \001" + "mon.proto\"\274\002\n\013FieldSchema\022\017\n\007fieldID\030\001 \001" "(\003\022\014\n\004name\030\002 \001(\t\022\026\n\016is_primary_key\030\003 \001(\010" "\022\023\n\013description\030\004 \001(\t\0220\n\tdata_type\030\005 \001(\016" "2\035.milvus.proto.schema.DataType\0226\n\013type_" "params\030\006 \003(\0132!.milvus.proto.common.KeyVa" "luePair\0227\n\014index_params\030\007 \003(\0132!.milvus.p" "roto.common.KeyValuePair\022\016\n\006autoID\030\010 \001(\010" - "\"w\n\020CollectionSchema\022\014\n\004name\030\001 \001(\t\022\023\n\013de" - "scription\030\002 \001(\t\022\016\n\006autoID\030\003 \001(\010\0220\n\006field" - "s\030\004 \003(\0132 .milvus.proto.schema.FieldSchem" - "a\"\031\n\tBoolArray\022\014\n\004data\030\001 \003(\010\"\030\n\010IntArray" - "\022\014\n\004data\030\001 \003(\005\"\031\n\tLongArray\022\014\n\004data\030\001 \003(" - "\003\"\032\n\nFloatArray\022\014\n\004data\030\001 \003(\002\"\033\n\013DoubleA" - "rray\022\014\n\004data\030\001 \003(\001\"\032\n\nBytesArray\022\014\n\004data" - "\030\001 \003(\014\"\033\n\013StringArray\022\014\n\004data\030\001 \003(\t\"\222\003\n\013" - "ScalarField\0223\n\tbool_data\030\001 \001(\0132\036.milvus." - "proto.schema.BoolArrayH\000\0221\n\010int_data\030\002 \001" - "(\0132\035.milvus.proto.schema.IntArrayH\000\0223\n\tl" - "ong_data\030\003 \001(\0132\036.milvus.proto.schema.Lon" - "gArrayH\000\0225\n\nfloat_data\030\004 \001(\0132\037.milvus.pr" - "oto.schema.FloatArrayH\000\0227\n\013double_data\030\005" - " \001(\0132 .milvus.proto.schema.DoubleArrayH\000" - "\0227\n\013string_data\030\006 \001(\0132 .milvus.proto.sch" - "ema.StringArrayH\000\0225\n\nbytes_data\030\007 \001(\0132\037." - "milvus.proto.schema.BytesArrayH\000B\006\n\004data" - "\"t\n\013VectorField\022\013\n\003dim\030\001 \001(\003\0227\n\014float_ve" - "ctor\030\002 \001(\0132\037.milvus.proto.schema.FloatAr" - "rayH\000\022\027\n\rbinary_vector\030\003 \001(\014H\000B\006\n\004data\"\321" - "\001\n\tFieldData\022+\n\004type\030\001 \001(\0162\035.milvus.prot" - "o.schema.DataType\022\022\n\nfield_name\030\002 \001(\t\0223\n" - "\007scalars\030\003 \001(\0132 .milvus.proto.schema.Sca" - "larFieldH\000\0223\n\007vectors\030\004 \001(\0132 .milvus.pro" - "to.schema.VectorFieldH\000\022\020\n\010field_id\030\005 \001(" - "\003B\007\n\005field\"w\n\003IDs\0220\n\006int_id\030\001 \001(\0132\036.milv" - "us.proto.schema.LongArrayH\000\0222\n\006str_id\030\002 " - "\001(\0132 .milvus.proto.schema.StringArrayH\000B" - "\n\n\010id_field\"\261\001\n\020SearchResultData\022\023\n\013num_" - "queries\030\001 \001(\003\022\r\n\005top_k\030\002 \001(\003\0223\n\013fields_d" - "ata\030\003 \003(\0132\036.milvus.proto.schema.FieldDat" - "a\022\016\n\006scores\030\004 \003(\002\022%\n\003ids\030\005 \001(\0132\030.milvus." - "proto.schema.IDs\022\r\n\005topks\030\006 \003(\003*\234\001\n\010Data" - "Type\022\010\n\004None\020\000\022\010\n\004Bool\020\001\022\010\n\004Int8\020\002\022\t\n\005In" - "t16\020\003\022\t\n\005Int32\020\004\022\t\n\005Int64\020\005\022\t\n\005Float\020\n\022\n" - "\n\006Double\020\013\022\n\n\006String\020\024\022\013\n\007VarChar\020\025\022\020\n\014B" - "inaryVector\020d\022\017\n\013FloatVector\020eBW\n\016io.mil" - "vus.grpcB\013SchemaProtoP\001Z3github.com/milv" - "us-io/milvus/internal/proto/schemapb\240\001\001b" - "\006proto3" + "\022.\n\005state\030\t \001(\0162\037.milvus.proto.schema.Fi" + "eldState\"w\n\020CollectionSchema\022\014\n\004name\030\001 \001" + "(\t\022\023\n\013description\030\002 \001(\t\022\016\n\006autoID\030\003 \001(\010\022" + "0\n\006fields\030\004 \003(\0132 .milvus.proto.schema.Fi" + "eldSchema\"\031\n\tBoolArray\022\014\n\004data\030\001 \003(\010\"\030\n\010" + "IntArray\022\014\n\004data\030\001 \003(\005\"\031\n\tLongArray\022\014\n\004d" + "ata\030\001 \003(\003\"\032\n\nFloatArray\022\014\n\004data\030\001 \003(\002\"\033\n" + "\013DoubleArray\022\014\n\004data\030\001 \003(\001\"\032\n\nBytesArray" + "\022\014\n\004data\030\001 \003(\014\"\033\n\013StringArray\022\014\n\004data\030\001 " + "\003(\t\"\222\003\n\013ScalarField\0223\n\tbool_data\030\001 \001(\0132\036" + ".milvus.proto.schema.BoolArrayH\000\0221\n\010int_" + "data\030\002 \001(\0132\035.milvus.proto.schema.IntArra" + "yH\000\0223\n\tlong_data\030\003 \001(\0132\036.milvus.proto.sc" + "hema.LongArrayH\000\0225\n\nfloat_data\030\004 \001(\0132\037.m" + "ilvus.proto.schema.FloatArrayH\000\0227\n\013doubl" + "e_data\030\005 \001(\0132 .milvus.proto.schema.Doubl" + "eArrayH\000\0227\n\013string_data\030\006 \001(\0132 .milvus.p" + "roto.schema.StringArrayH\000\0225\n\nbytes_data\030" + "\007 \001(\0132\037.milvus.proto.schema.BytesArrayH\000" + "B\006\n\004data\"t\n\013VectorField\022\013\n\003dim\030\001 \001(\003\0227\n\014" + "float_vector\030\002 \001(\0132\037.milvus.proto.schema" + ".FloatArrayH\000\022\027\n\rbinary_vector\030\003 \001(\014H\000B\006" + "\n\004data\"\321\001\n\tFieldData\022+\n\004type\030\001 \001(\0162\035.mil" + "vus.proto.schema.DataType\022\022\n\nfield_name\030" + "\002 \001(\t\0223\n\007scalars\030\003 \001(\0132 .milvus.proto.sc" + "hema.ScalarFieldH\000\0223\n\007vectors\030\004 \001(\0132 .mi" + "lvus.proto.schema.VectorFieldH\000\022\020\n\010field" + "_id\030\005 \001(\003B\007\n\005field\"w\n\003IDs\0220\n\006int_id\030\001 \001(" + "\0132\036.milvus.proto.schema.LongArrayH\000\0222\n\006s" + "tr_id\030\002 \001(\0132 .milvus.proto.schema.String" + "ArrayH\000B\n\n\010id_field\"\261\001\n\020SearchResultData" + "\022\023\n\013num_queries\030\001 \001(\003\022\r\n\005top_k\030\002 \001(\003\0223\n\013" + "fields_data\030\003 \003(\0132\036.milvus.proto.schema." + "FieldData\022\016\n\006scores\030\004 \003(\002\022%\n\003ids\030\005 \001(\0132\030" + ".milvus.proto.schema.IDs\022\r\n\005topks\030\006 \003(\003*" + "\234\001\n\010DataType\022\010\n\004None\020\000\022\010\n\004Bool\020\001\022\010\n\004Int8" + "\020\002\022\t\n\005Int16\020\003\022\t\n\005Int32\020\004\022\t\n\005Int64\020\005\022\t\n\005F" + "loat\020\n\022\n\n\006Double\020\013\022\n\n\006String\020\024\022\013\n\007VarCha" + "r\020\025\022\020\n\014BinaryVector\020d\022\017\n\013FloatVector\020e*V" + "\n\nFieldState\022\020\n\014FieldCreated\020\000\022\021\n\rFieldC" + "reating\020\001\022\021\n\rFieldDropping\020\002\022\020\n\014FieldDro" + "pped\020\003BW\n\016io.milvus.grpcB\013SchemaProtoP\001Z" + "3github.com/milvus-io/milvus/internal/pr" + "oto/schemapb\240\001\001b\006proto3" ; static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_schema_2eproto_deps[1] = { &::descriptor_table_common_2eproto, @@ -544,7 +548,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_sch static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_schema_2eproto_once; static bool descriptor_table_schema_2eproto_initialized = false; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_schema_2eproto = { - &descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 1927, + &descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 2063, &descriptor_table_schema_2eproto_once, descriptor_table_schema_2eproto_sccs, descriptor_table_schema_2eproto_deps, 14, 1, schemas, file_default_instances, TableStruct_schema_2eproto::offsets, file_level_metadata_schema_2eproto, 14, file_level_enum_descriptors_schema_2eproto, file_level_service_descriptors_schema_2eproto, @@ -579,6 +583,22 @@ bool DataType_IsValid(int value) { } } +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldState_descriptor() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_schema_2eproto); + return file_level_enum_descriptors_schema_2eproto[1]; +} +bool FieldState_IsValid(int value) { + switch (value) { + case 0: + case 1: + case 2: + case 3: + return true; + default: + return false; + } +} + // =================================================================== @@ -614,8 +634,8 @@ FieldSchema::FieldSchema(const FieldSchema& from) description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_); } ::memcpy(&fieldid_, &from.fieldid_, - static_cast(reinterpret_cast(&autoid_) - - reinterpret_cast(&fieldid_)) + sizeof(autoid_)); + static_cast(reinterpret_cast(&state_) - + reinterpret_cast(&fieldid_)) + sizeof(state_)); // @@protoc_insertion_point(copy_constructor:milvus.proto.schema.FieldSchema) } @@ -624,8 +644,8 @@ void FieldSchema::SharedCtor() { name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); description_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(&fieldid_, 0, static_cast( - reinterpret_cast(&autoid_) - - reinterpret_cast(&fieldid_)) + sizeof(autoid_)); + reinterpret_cast(&state_) - + reinterpret_cast(&fieldid_)) + sizeof(state_)); } FieldSchema::~FieldSchema() { @@ -658,8 +678,8 @@ void FieldSchema::Clear() { name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); description_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(&fieldid_, 0, static_cast( - reinterpret_cast(&autoid_) - - reinterpret_cast(&fieldid_)) + sizeof(autoid_)); + reinterpret_cast(&state_) - + reinterpret_cast(&fieldid_)) + sizeof(state_)); _internal_metadata_.Clear(); } @@ -738,6 +758,14 @@ const char* FieldSchema::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID CHK_(ptr); } else goto handle_unusual; continue; + // .milvus.proto.schema.FieldState state = 9; + case 9: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) { + ::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr); + CHK_(ptr); + set_state(static_cast<::milvus::proto::schema::FieldState>(val)); + } else goto handle_unusual; + continue; default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -873,6 +901,20 @@ bool FieldSchema::MergePartialFromCodedStream( break; } + // .milvus.proto.schema.FieldState state = 9; + case 9: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) { + int value = 0; + DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive< + int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>( + input, &value))); + set_state(static_cast< ::milvus::proto::schema::FieldState >(value)); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -959,6 +1001,12 @@ void FieldSchema::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->autoid(), output); } + // .milvus.proto.schema.FieldState state = 9; + if (this->state() != 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum( + 9, this->state(), output); + } + if (_internal_metadata_.have_unknown_fields()) { ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -1031,6 +1079,12 @@ void FieldSchema::SerializeWithCachedSizes( target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->autoid(), target); } + // .milvus.proto.schema.FieldState state = 9; + if (this->state() != 0) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray( + 9, this->state(), target); + } + if (_internal_metadata_.have_unknown_fields()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -1111,6 +1165,12 @@ size_t FieldSchema::ByteSizeLong() const { total_size += 1 + 1; } + // .milvus.proto.schema.FieldState state = 9; + if (this->state() != 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->state()); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -1160,6 +1220,9 @@ void FieldSchema::MergeFrom(const FieldSchema& from) { if (from.autoid() != 0) { set_autoid(from.autoid()); } + if (from.state() != 0) { + set_state(from.state()); + } } void FieldSchema::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { @@ -1193,6 +1256,7 @@ void FieldSchema::InternalSwap(FieldSchema* other) { swap(data_type_, other->data_type_); swap(is_primary_key_, other->is_primary_key_); swap(autoid_, other->autoid_); + swap(state_, other->state_); } ::PROTOBUF_NAMESPACE_ID::Metadata FieldSchema::GetMetadata() const { diff --git a/internal/core/src/pb/schema.pb.h b/internal/core/src/pb/schema.pb.h index 7bd1b94c69..349875d8d3 100644 --- a/internal/core/src/pb/schema.pb.h +++ b/internal/core/src/pb/schema.pb.h @@ -159,6 +159,33 @@ inline bool DataType_Parse( return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( DataType_descriptor(), name, value); } +enum FieldState : int { + FieldCreated = 0, + FieldCreating = 1, + FieldDropping = 2, + FieldDropped = 3, + FieldState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::min(), + FieldState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::max() +}; +bool FieldState_IsValid(int value); +constexpr FieldState FieldState_MIN = FieldCreated; +constexpr FieldState FieldState_MAX = FieldDropped; +constexpr int FieldState_ARRAYSIZE = FieldState_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldState_descriptor(); +template +inline const std::string& FieldState_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function FieldState_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + FieldState_descriptor(), enum_t_value); +} +inline bool FieldState_Parse( + const std::string& name, FieldState* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + FieldState_descriptor(), name, value); +} // =================================================================== class FieldSchema : @@ -282,6 +309,7 @@ class FieldSchema : kDataTypeFieldNumber = 5, kIsPrimaryKeyFieldNumber = 3, kAutoIDFieldNumber = 8, + kStateFieldNumber = 9, }; // repeated .milvus.proto.common.KeyValuePair type_params = 6; int type_params_size() const; @@ -347,6 +375,11 @@ class FieldSchema : bool autoid() const; void set_autoid(bool value); + // .milvus.proto.schema.FieldState state = 9; + void clear_state(); + ::milvus::proto::schema::FieldState state() const; + void set_state(::milvus::proto::schema::FieldState value); + // @@protoc_insertion_point(class_scope:milvus.proto.schema.FieldSchema) private: class _Internal; @@ -360,6 +393,7 @@ class FieldSchema : int data_type_; bool is_primary_key_; bool autoid_; + int state_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_schema_2eproto; }; @@ -2669,6 +2703,20 @@ inline void FieldSchema::set_autoid(bool value) { // @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.autoID) } +// .milvus.proto.schema.FieldState state = 9; +inline void FieldSchema::clear_state() { + state_ = 0; +} +inline ::milvus::proto::schema::FieldState FieldSchema::state() const { + // @@protoc_insertion_point(field_get:milvus.proto.schema.FieldSchema.state) + return static_cast< ::milvus::proto::schema::FieldState >(state_); +} +inline void FieldSchema::set_state(::milvus::proto::schema::FieldState value) { + + state_ = value; + // @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.state) +} + // ------------------------------------------------------------------- // CollectionSchema @@ -4070,6 +4118,11 @@ template <> inline const EnumDescriptor* GetEnumDescriptor< ::milvus::proto::schema::DataType>() { return ::milvus::proto::schema::DataType_descriptor(); } +template <> struct is_proto_enum< ::milvus::proto::schema::FieldState> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::milvus::proto::schema::FieldState>() { + return ::milvus::proto::schema::FieldState_descriptor(); +} PROTOBUF_NAMESPACE_CLOSE diff --git a/internal/datacoord/mock_test.go b/internal/datacoord/mock_test.go index d1bb217dda..129ccd9691 100644 --- a/internal/datacoord/mock_test.go +++ b/internal/datacoord/mock_test.go @@ -432,10 +432,6 @@ func (m *mockRootCoordService) UpdateChannelTimeTick(ctx context.Context, req *i panic("not implemented") // TODO: Implement } -func (m *mockRootCoordService) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - panic("not implemented") // TODO: Implement -} - func (m *mockRootCoordService) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { panic("not implemented") // TODO: Implement } diff --git a/internal/distributed/proxy/client/client.go b/internal/distributed/proxy/client/client.go index 966a62db48..975ad94da7 100644 --- a/internal/distributed/proxy/client/client.go +++ b/internal/distributed/proxy/client/client.go @@ -136,46 +136,6 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb return ret.(*commonpb.Status), err } -// ReleaseDQLMessageStream release dql message stream by request -func (c *Client) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { - if !funcutil.CheckCtxValid(ctx) { - return nil, ctx.Err() - } - return client.(proxypb.ProxyClient).ReleaseDQLMessageStream(ctx, req) - }) - if err != nil || ret == nil { - return nil, err - } - return ret.(*commonpb.Status), err -} - -func (c *Client) SendSearchResult(ctx context.Context, results *internalpb.SearchResults) (*commonpb.Status, error) { - ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { - if !funcutil.CheckCtxValid(ctx) { - return nil, ctx.Err() - } - return client.(proxypb.ProxyClient).SendSearchResult(ctx, results) - }) - if err != nil || ret == nil { - return nil, err - } - return ret.(*commonpb.Status), err -} - -func (c *Client) SendRetrieveResult(ctx context.Context, results *internalpb.RetrieveResults) (*commonpb.Status, error) { - ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { - if !funcutil.CheckCtxValid(ctx) { - return nil, ctx.Err() - } - return client.(proxypb.ProxyClient).SendRetrieveResult(ctx, results) - }) - if err != nil || ret == nil { - return nil, err - } - return ret.(*commonpb.Status), err -} - func (c *Client) InvalidateCredentialCache(ctx context.Context, req *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { if !funcutil.CheckCtxValid(ctx) { diff --git a/internal/distributed/proxy/client/client_test.go b/internal/distributed/proxy/client/client_test.go index 1cd8469088..ea8fb62f4b 100644 --- a/internal/distributed/proxy/client/client_test.go +++ b/internal/distributed/proxy/client/client_test.go @@ -67,15 +67,6 @@ func Test_NewClient(t *testing.T) { r3, err := client.InvalidateCollectionMetaCache(ctx, nil) retCheck(retNotNil, r3, err) - r4, err := client.ReleaseDQLMessageStream(ctx, nil) - retCheck(retNotNil, r4, err) - - r5, err := client.SendSearchResult(ctx, nil) - retCheck(retNotNil, r5, err) - - r6, err := client.SendRetrieveResult(ctx, nil) - retCheck(retNotNil, r6, err) - r7, err := client.InvalidateCredentialCache(ctx, nil) retCheck(retNotNil, r7, err) @@ -140,15 +131,6 @@ func Test_NewClient(t *testing.T) { r3Timeout, err := client.InvalidateCollectionMetaCache(shortCtx, nil) retCheck(r3Timeout, err) - r4Timeout, err := client.ReleaseDQLMessageStream(shortCtx, nil) - retCheck(r4Timeout, err) - - r5Timeout, err := client.SendSearchResult(shortCtx, nil) - retCheck(r5Timeout, err) - - r6Timeout, err := client.SendRetrieveResult(shortCtx, nil) - retCheck(r6Timeout, err) - r7Timeout, err := client.InvalidateCredentialCache(shortCtx, nil) retCheck(r7Timeout, err) diff --git a/internal/distributed/proxy/service.go b/internal/distributed/proxy/service.go index 0bc66157c8..3a788b0519 100644 --- a/internal/distributed/proxy/service.go +++ b/internal/distributed/proxy/service.go @@ -548,11 +548,6 @@ func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, request *pro return s.proxy.InvalidateCollectionMetaCache(ctx, request) } -// ReleaseDQLMessageStream notifies Proxy to release and close the search message stream of specific collection. -func (s *Server) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return s.proxy.ReleaseDQLMessageStream(ctx, request) -} - // CreateCollection notifies Proxy to create a collection func (s *Server) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) { return s.proxy.CreateCollection(ctx, request) @@ -743,14 +738,6 @@ func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateR return s.proxy.GetFlushState(ctx, req) } -func (s *Server) SendSearchResult(ctx context.Context, results *internalpb.SearchResults) (*commonpb.Status, error) { - return s.proxy.SendSearchResult(ctx, results) -} - -func (s *Server) SendRetrieveResult(ctx context.Context, results *internalpb.RetrieveResults) (*commonpb.Status, error) { - return s.proxy.SendRetrieveResult(ctx, results) -} - func (s *Server) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) { return s.proxy.Import(ctx, req) } diff --git a/internal/distributed/proxy/service_test.go b/internal/distributed/proxy/service_test.go index c58ef29c5d..8b4014b5c5 100644 --- a/internal/distributed/proxy/service_test.go +++ b/internal/distributed/proxy/service_test.go @@ -182,10 +182,6 @@ func (m *MockRootCoord) ShowSegments(ctx context.Context, req *milvuspb.ShowSegm return nil, nil } -func (m *MockRootCoord) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return nil, nil -} - func (m *MockRootCoord) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { return nil, nil } @@ -597,10 +593,6 @@ func (m *MockProxy) InvalidateCollectionMetaCache(ctx context.Context, request * return nil, nil } -func (m *MockProxy) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return nil, nil -} - func (m *MockProxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) { return nil, nil } @@ -784,14 +776,6 @@ func (m *MockProxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushSta return nil, nil } -func (m *MockProxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) { - return nil, nil -} - -func (m *MockProxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) { - return nil, nil -} - func (m *MockProxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) { return nil, nil } @@ -1014,11 +998,6 @@ func Test_NewServer(t *testing.T) { assert.Nil(t, err) }) - t.Run("ReleaseDQLMessageStream", func(t *testing.T) { - _, err := server.ReleaseDQLMessageStream(ctx, nil) - assert.Nil(t, err) - }) - t.Run("CreateCollection", func(t *testing.T) { _, err := server.CreateCollection(ctx, nil) assert.Nil(t, err) diff --git a/internal/distributed/rootcoord/client/client.go b/internal/distributed/rootcoord/client/client.go index 447e01fc4b..45c2f46bc2 100644 --- a/internal/distributed/rootcoord/client/client.go +++ b/internal/distributed/rootcoord/client/client.go @@ -343,20 +343,6 @@ func (c *Client) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ return ret.(*milvuspb.ShowSegmentsResponse), err } -// ReleaseDQLMessageStream release DQL msgstream -func (c *Client) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { - if !funcutil.CheckCtxValid(ctx) { - return nil, ctx.Err() - } - return client.(rootcoordpb.RootCoordClient).ReleaseDQLMessageStream(ctx, in) - }) - if err != nil || ret == nil { - return nil, err - } - return ret.(*commonpb.Status), err -} - // InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies. func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) { diff --git a/internal/distributed/rootcoord/client/client_test.go b/internal/distributed/rootcoord/client/client_test.go index 5bbf42a6e1..dc1b683d0f 100644 --- a/internal/distributed/rootcoord/client/client_test.go +++ b/internal/distributed/rootcoord/client/client_test.go @@ -124,10 +124,6 @@ func Test_NewClient(t *testing.T) { r, err := client.ShowSegments(ctx, nil) retCheck(retNotNil, r, err) } - { - r, err := client.ReleaseDQLMessageStream(ctx, nil) - retCheck(retNotNil, r, err) - } { r, err := client.GetMetrics(ctx, nil) retCheck(retNotNil, r, err) @@ -326,10 +322,6 @@ func Test_NewClient(t *testing.T) { rTimeout, err := client.ShowSegments(shortCtx, nil) retCheck(rTimeout, err) } - { - rTimeout, err := client.ReleaseDQLMessageStream(shortCtx, nil) - retCheck(rTimeout, err) - } { rTimeout, err := client.GetMetrics(shortCtx, nil) retCheck(rTimeout, err) diff --git a/internal/distributed/rootcoord/service.go b/internal/distributed/rootcoord/service.go index 6637e80964..331352375d 100644 --- a/internal/distributed/rootcoord/service.go +++ b/internal/distributed/rootcoord/service.go @@ -405,11 +405,6 @@ func (s *Server) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ return s.rootCoord.ShowSegments(ctx, in) } -// ReleaseDQLMessageStream notifies RootCoord to release and close the search message stream of specific collection. -func (s *Server) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return s.rootCoord.ReleaseDQLMessageStream(ctx, in) -} - // InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies. func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { return s.rootCoord.InvalidateCollectionMetaCache(ctx, in) diff --git a/internal/distributed/rootcoord/service_test.go b/internal/distributed/rootcoord/service_test.go index 6fe4c398b0..2d7c582ed0 100644 --- a/internal/distributed/rootcoord/service_test.go +++ b/internal/distributed/rootcoord/service_test.go @@ -18,32 +18,21 @@ package grpcrootcoord import ( "context" - "encoding/json" "fmt" "math/rand" "path" - "sync" "testing" "time" - "github.com/milvus-io/milvus/internal/proto/indexpb" - clientv3 "go.etcd.io/etcd/client/v3" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" - rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client" "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/proxypb" - "github.com/milvus-io/milvus/internal/proto/rootcoordpb" - "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/rootcoord" "github.com/milvus-io/milvus/internal/types" - "github.com/milvus-io/milvus/internal/util/dependency" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/retry" "github.com/milvus-io/milvus/internal/util/sessionutil" @@ -59,595 +48,6 @@ func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request * return p.invalidateCollectionMetaCache(ctx, request) } -func (p *proxyMock) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil -} - -func TestGrpcService(t *testing.T) { - const ( - dbName = "testDB" - collName = "testColl" - collName2 = "testColl-again" - partName = "testPartition" - fieldName = "vector" - fieldID = 100 - segID = 1001 - ) - rand.Seed(time.Now().UnixNano()) - randVal := rand.Int() - - Params.InitOnce(typeutil.RootCoordRole) - Params.Port = (randVal % 100) + 10000 - t.Log("newParams.Address:", Params.GetAddress()) - - ctx := context.Background() - factory := dependency.NewDefaultFactory(true) - svr, err := NewServer(ctx, factory) - assert.Nil(t, err) - - rootcoord.Params.Init() - rootcoord.Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/test/meta", randVal) - rootcoord.Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/test/kv", randVal) - rootcoord.Params.CommonCfg.RootCoordSubName = fmt.Sprintf("msgChannel%d", randVal) - rootcoord.Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("timeTick%d", randVal) - rootcoord.Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("stateChannel%d", randVal) - - rootcoord.Params.RootCoordCfg.MaxPartitionNum = 64 - rootcoord.Params.CommonCfg.DefaultPartitionName = "_default" - rootcoord.Params.CommonCfg.DefaultIndexName = "_default" - - t.Logf("service port = %d", Params.Port) - - core, ok := (svr.rootCoord).(*rootcoord.Core) - assert.True(t, ok) - - err = svr.startGrpc(Params.Port) - assert.Nil(t, err) - svr.rootCoord.UpdateStateCode(internalpb.StateCode_Initializing) - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - sessKey := path.Join(rootcoord.Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot) - _, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix()) - assert.Nil(t, err) - - pnb, err := json.Marshal( - &sessionutil.Session{ - ServerID: 100, - }, - ) - assert.Nil(t, err) - _, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyRole+"-100"), string(pnb)) - assert.Nil(t, err) - - rootcoord.Params.RootCoordCfg.Address = Params.GetAddress() - - core.SetEtcdClient(etcdCli) - err = core.Init() - assert.Nil(t, err) - - timeTickArray := make([]typeutil.Timestamp, 0, 16) - timeTickLock := sync.Mutex{} - core.SendTimeTick = func(ts typeutil.Timestamp, reason string) error { - timeTickLock.Lock() - defer timeTickLock.Unlock() - t.Logf("send time tick %d", ts) - timeTickArray = append(timeTickArray, ts) - return nil - } - core.SendDdCreateCollectionReq = func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error) { - return map[string][]byte{}, nil - } - - dropCollectionArray := make([]*internalpb.DropCollectionRequest, 0, 16) - core.SendDdDropCollectionReq = func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error { - t.Logf("Drop Collection %s", req.CollectionName) - dropCollectionArray = append(dropCollectionArray, req) - return nil - } - - createPartitionArray := make([]*internalpb.CreatePartitionRequest, 0, 16) - core.SendDdCreatePartitionReq = func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error { - t.Logf("Create Partition %s", req.PartitionName) - createPartitionArray = append(createPartitionArray, req) - return nil - } - - dropPartitionArray := make([]*internalpb.DropPartitionRequest, 0, 16) - core.SendDdDropPartitionReq = func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error { - t.Logf("Drop Partition %s", req.PartitionName) - dropPartitionArray = append(dropPartitionArray, req) - return nil - } - - core.CallGetRecoveryInfoService = func(ctx context.Context, collID, partID rootcoord.UniqueID) ([]*datapb.SegmentBinlogs, error) { - return []*datapb.SegmentBinlogs{ - { - SegmentID: segID, - NumOfRows: rootcoord.Params.RootCoordCfg.MinSegmentSizeToEnableIndex, - FieldBinlogs: []*datapb.FieldBinlog{ - { - FieldID: fieldID, - Binlogs: []*datapb.Binlog{{LogPath: "file1"}, {LogPath: "file2"}, {LogPath: "file3"}}, - }, - }, - }, - }, nil - } - - core.CallWatchChannels = func(ctx context.Context, collectionID int64, channelNames []string, startPositions []*commonpb.KeyDataPair) error { - return nil - } - - var segs []typeutil.UniqueID - segLock := sync.Mutex{} - core.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { - segLock.Lock() - defer segLock.Unlock() - var ret []typeutil.UniqueID - ret = append(ret, segs...) - return ret, nil - } - - collectionMetaCache := make([]string, 0, 16) - pnm := proxyMock{} - core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return &pnm, nil - } - pnm.invalidateCollectionMetaCache = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { - collectionMetaCache = append(collectionMetaCache, request.CollectionName) - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil - } - - core.CallReleaseCollectionService = func(ctx context.Context, ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) error { - return nil - } - core.CallReleasePartitionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) error { - return nil - } - core.CallImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse { - return nil - } - core.CallAddSegRefLock = func(context.Context, int64, []int64) error { - return nil - } - core.CallReleaseSegRefLock = func(context.Context, int64, []int64) error { - return nil - } - core.CallDropCollectionIndexService = func(ctx context.Context, collID rootcoord.UniqueID) error { - return nil - } - core.CallGetSegmentIndexStateService = func(ctx context.Context, collID rootcoord.UniqueID, indexName string, segIDs []rootcoord.UniqueID) ([]*indexpb.SegmentIndexState, error) { - return nil, nil - } - - err = svr.start() - assert.Nil(t, err) - - svr.rootCoord.UpdateStateCode(internalpb.StateCode_Healthy) - - cli, err := rcc.NewClient(context.Background(), rootcoord.Params.EtcdCfg.MetaRootPath, etcdCli) - assert.Nil(t, err) - - err = cli.Init() - assert.Nil(t, err) - - err = cli.Start() - assert.Nil(t, err) - - t.Run("get component states", func(t *testing.T) { - req := &internalpb.GetComponentStatesRequest{} - rsp, err := svr.GetComponentStates(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("get time tick channel", func(t *testing.T) { - req := &internalpb.GetTimeTickChannelRequest{} - rsp, err := svr.GetTimeTickChannel(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("get statistics channel", func(t *testing.T) { - req := &internalpb.GetStatisticsChannelRequest{} - rsp, err := svr.GetStatisticsChannel(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("alloc time stamp", func(t *testing.T) { - req := &rootcoordpb.AllocTimestampRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_RequestTSO, - }, - Count: 1, - } - rsp, err := svr.AllocTimestamp(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("alloc id", func(t *testing.T) { - req := &rootcoordpb.AllocIDRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_RequestID, - }, - Count: 1, - } - rsp, err := svr.AllocID(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("update channel timetick", func(t *testing.T) { - req := &internalpb.ChannelTimeTickMsg{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_TimeTick, - }, - } - status, err := svr.UpdateChannelTimeTick(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - }) - - t.Run("release DQL msg stream", func(t *testing.T) { - req := &proxypb.ReleaseDQLMessageStreamRequest{} - rsp, err := svr.ReleaseDQLMessageStream(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode) - }) - - t.Run("show configurations", func(t *testing.T) { - req := &internalpb.ShowConfigurationsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_WatchQueryChannels, - MsgID: rand.Int63(), - }, - Pattern: "", - } - rsp, err := svr.ShowConfigurations(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("get metrics", func(t *testing.T) { - req := &milvuspb.GetMetricsRequest{} - rsp, err := svr.GetMetrics(ctx, req) - assert.Nil(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - t.Run("create collection", func(t *testing.T) { - schema := schemapb.CollectionSchema{ - Name: collName, - AutoID: true, - Fields: []*schemapb.FieldSchema{ - { - FieldID: fieldID, - Name: fieldName, - IsPrimaryKey: false, - DataType: schemapb.DataType_FloatVector, - TypeParams: nil, - IndexParams: []*commonpb.KeyValuePair{ - { - Key: "ik1", - Value: "iv1", - }, - }, - }, - }, - } - - sbf, err := proto.Marshal(&schema) - assert.Nil(t, err) - - req := &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 100, - Timestamp: 100, - SourceID: 100, - }, - DbName: dbName, - CollectionName: collName, - Schema: sbf, - } - - status, err := cli.CreateCollection(ctx, req) - assert.Nil(t, err) - colls, err := core.MetaTable.ListCollections(0) - assert.Nil(t, err) - - assert.Equal(t, 1, len(colls)) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - //assert.Equal(t, commonpb.MsgType_CreateCollection, createCollectionArray[0].Base.MsgType) - _, has := colls[collName] - assert.True(t, has) - - req.Base.MsgID = 101 - req.Base.Timestamp = 101 - req.Base.SourceID = 101 - status, err = cli.CreateCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - - req.Base.MsgID = 102 - req.Base.Timestamp = 102 - req.Base.SourceID = 102 - req.CollectionName = collName2 - status, err = cli.CreateCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - - schema.Name = req.CollectionName - sbf, err = proto.Marshal(&schema) - assert.Nil(t, err) - req.Schema = sbf - req.Base.MsgID = 103 - req.Base.Timestamp = 103 - req.Base.SourceID = 103 - status, err = cli.CreateCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - colls, err = core.MetaTable.ListCollections(0) - assert.Nil(t, err) - assert.Equal(t, 2, len(colls)) - _, has = colls[collName2] - assert.True(t, has) - }) - - t.Run("has collection", func(t *testing.T) { - req := &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 110, - Timestamp: 110, - SourceID: 110, - }, - DbName: "testDb", - CollectionName: collName, - } - rsp, err := cli.HasCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, true, rsp.Value) - - req = &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 111, - Timestamp: 111, - SourceID: 111, - }, - DbName: "testDb", - CollectionName: "testColl2", - } - rsp, err = cli.HasCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, false, rsp.Value) - - // test time stamp go back - req = &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 111, - Timestamp: 111, - SourceID: 111, - }, - DbName: "testDb", - CollectionName: "testColl2", - } - rsp, err = cli.HasCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, false, rsp.Value) - }) - - t.Run("describe collection", func(t *testing.T) { - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.Nil(t, err) - req := &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 120, - Timestamp: 120, - SourceID: 120, - }, - DbName: "testDb", - CollectionName: collName, - } - rsp, err := cli.DescribeCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) - }) - - t.Run("show collection", func(t *testing.T) { - req := &milvuspb.ShowCollectionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowCollections, - MsgID: 130, - Timestamp: 130, - SourceID: 130, - }, - DbName: "testDb", - } - rsp, err := cli.ShowCollections(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.ElementsMatch(t, rsp.CollectionNames, []string{collName, collName2}) - assert.Equal(t, 2, len(rsp.CollectionNames)) - }) - - t.Run("create partition", func(t *testing.T) { - req := &milvuspb.CreatePartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreatePartition, - MsgID: 140, - Timestamp: 140, - SourceID: 140, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - status, err := cli.CreatePartition(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.Nil(t, err) - assert.Equal(t, 2, len(collMeta.Partitions)) - partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0) - assert.Nil(t, err) - assert.Equal(t, partName, partName2) - assert.Equal(t, 1, len(collectionMetaCache)) - }) - - t.Run("has partition", func(t *testing.T) { - req := &milvuspb.HasPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasPartition, - MsgID: 150, - Timestamp: 150, - SourceID: 150, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - rsp, err := cli.HasPartition(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, true, rsp.Value) - }) - - t.Run("show partition", func(t *testing.T) { - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.Nil(t, err) - req := &milvuspb.ShowPartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowPartitions, - MsgID: 160, - Timestamp: 160, - SourceID: 160, - }, - DbName: "testDb", - CollectionName: collName, - CollectionID: coll.CollectionID, - } - rsp, err := cli.ShowPartitions(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, 2, len(rsp.PartitionNames)) - assert.Equal(t, 2, len(rsp.PartitionIDs)) - }) - - t.Run("show segment", func(t *testing.T) { - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.Nil(t, err) - partID := coll.Partitions[1].PartitionID - _, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0) - assert.Nil(t, err) - - segLock.Lock() - segs = []typeutil.UniqueID{1000} - segLock.Unlock() - - req := &milvuspb.ShowSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowSegments, - MsgID: 170, - Timestamp: 170, - SourceID: 170, - }, - CollectionID: coll.CollectionID, - PartitionID: partID, - } - rsp, err := cli.ShowSegments(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, int64(1000), rsp.SegmentIDs[0]) - assert.Equal(t, 1, len(rsp.SegmentIDs)) - }) - - t.Run("drop partition", func(t *testing.T) { - req := &milvuspb.DropPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropPartition, - MsgID: 220, - Timestamp: 220, - SourceID: 220, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - status, err := cli.DropPartition(ctx, req) - assert.Nil(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.Nil(t, err) - assert.Equal(t, 1, len(collMeta.Partitions)) - partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0) - assert.Nil(t, err) - assert.Equal(t, rootcoord.Params.CommonCfg.DefaultPartitionName, partName) - assert.Equal(t, 2, len(collectionMetaCache)) - }) - - t.Run("drop collection", func(t *testing.T) { - req := &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 230, - Timestamp: 230, - SourceID: 230, - }, - DbName: "testDb", - CollectionName: collName, - } - - status, err := cli.DropCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, 1, len(dropCollectionArray)) - assert.Equal(t, commonpb.MsgType_DropCollection, dropCollectionArray[0].Base.MsgType) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - assert.Equal(t, collName, dropCollectionArray[0].CollectionName) - assert.Equal(t, 3, len(collectionMetaCache)) - - req = &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 231, - Timestamp: 231, - SourceID: 231, - }, - DbName: "testDb", - CollectionName: collName, - } - status, err = cli.DropCollection(ctx, req) - assert.Nil(t, err) - assert.Equal(t, 1, len(dropCollectionArray)) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - }) - - err = cli.Stop() - assert.Nil(t, err) - - err = svr.Stop() - assert.Nil(t, err) - - _, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix()) - assert.Nil(t, err) - -} - type mockCore struct { types.RootCoordComponent } diff --git a/internal/kv/kv.go b/internal/kv/kv.go index e3e360c499..df898ba7bf 100644 --- a/internal/kv/kv.go +++ b/internal/kv/kv.go @@ -50,6 +50,7 @@ type BaseKV interface { Close() } +//go:generate mockery --name=TxnKV // TxnKV contains extra txn operations of kv. The extra operations is transactional. type TxnKV interface { BaseKV @@ -77,6 +78,7 @@ type MetaKv interface { CompareVersionAndSwap(key string, version int64, target string, opts ...clientv3.OpOption) (bool, error) } +//go:generate mockery --name=SnapShotKV // SnapShotKV is TxnKV for snapshot data. It must save timestamp. type SnapShotKV interface { Save(key string, value string, ts typeutil.Timestamp) error diff --git a/internal/kv/mock_txn_kv.go b/internal/kv/mock_txn_kv.go new file mode 100644 index 0000000000..53369b1de9 --- /dev/null +++ b/internal/kv/mock_txn_kv.go @@ -0,0 +1,59 @@ +package kv + +type TxnKVMock struct { + TxnKV + SaveF func(key, value string) error + RemoveF func(key string) error +} + +func (m TxnKVMock) Load(key string) (string, error) { + panic("implement me") +} + +func (m TxnKVMock) MultiLoad(keys []string) ([]string, error) { + panic("implement me") +} + +func (m TxnKVMock) LoadWithPrefix(key string) ([]string, []string, error) { + panic("implement me") +} + +func (m TxnKVMock) Save(key, value string) error { + return m.SaveF(key, value) +} + +func (m TxnKVMock) MultiSave(kvs map[string]string) error { + panic("implement me") +} + +func (m TxnKVMock) Remove(key string) error { + return m.RemoveF(key) +} + +func (m TxnKVMock) MultiRemove(keys []string) error { + panic("implement me") +} + +func (m TxnKVMock) RemoveWithPrefix(key string) error { + panic("implement me") +} + +func (m TxnKVMock) Close() { + panic("implement me") +} + +func (m TxnKVMock) MultiSaveAndRemove(saves map[string]string, removals []string) error { + panic("implement me") +} + +func (m TxnKVMock) MultiRemoveWithPrefix(keys []string) error { + panic("implement me") +} + +func (m TxnKVMock) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error { + panic("implement me") +} + +func NewMockTxnKV() *TxnKVMock { + return &TxnKVMock{} +} diff --git a/internal/kv/mocks/SnapShotKV.go b/internal/kv/mocks/SnapShotKV.go new file mode 100644 index 0000000000..61bd3da4ce --- /dev/null +++ b/internal/kv/mocks/SnapShotKV.go @@ -0,0 +1,120 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// SnapShotKV is an autogenerated mock type for the SnapShotKV type +type SnapShotKV struct { + mock.Mock +} + +// Load provides a mock function with given fields: key, ts +func (_m *SnapShotKV) Load(key string, ts uint64) (string, error) { + ret := _m.Called(key, ts) + + var r0 string + if rf, ok := ret.Get(0).(func(string, uint64) string); ok { + r0 = rf(key, ts) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, uint64) error); ok { + r1 = rf(key, ts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadWithPrefix provides a mock function with given fields: key, ts +func (_m *SnapShotKV) LoadWithPrefix(key string, ts uint64) ([]string, []string, error) { + ret := _m.Called(key, ts) + + var r0 []string + if rf, ok := ret.Get(0).(func(string, uint64) []string); ok { + r0 = rf(key, ts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 []string + if rf, ok := ret.Get(1).(func(string, uint64) []string); ok { + r1 = rf(key, ts) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]string) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(string, uint64) error); ok { + r2 = rf(key, ts) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultiSave provides a mock function with given fields: kvs, ts +func (_m *SnapShotKV) MultiSave(kvs map[string]string, ts uint64) error { + ret := _m.Called(kvs, ts) + + var r0 error + if rf, ok := ret.Get(0).(func(map[string]string, uint64) error); ok { + r0 = rf(kvs, ts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiSaveAndRemoveWithPrefix provides a mock function with given fields: saves, removals, ts +func (_m *SnapShotKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts uint64) error { + ret := _m.Called(saves, removals, ts) + + var r0 error + if rf, ok := ret.Get(0).(func(map[string]string, []string, uint64) error); ok { + r0 = rf(saves, removals, ts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Save provides a mock function with given fields: key, value, ts +func (_m *SnapShotKV) Save(key string, value string, ts uint64) error { + ret := _m.Called(key, value, ts) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, uint64) error); ok { + r0 = rf(key, value, ts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewSnapShotKV interface { + mock.TestingT + Cleanup(func()) +} + +// NewSnapShotKV creates a new instance of SnapShotKV. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSnapShotKV(t mockConstructorTestingTNewSnapShotKV) *SnapShotKV { + mock := &SnapShotKV{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/kv/mocks/TxnKV.go b/internal/kv/mocks/TxnKV.go new file mode 100644 index 0000000000..9197f72bcd --- /dev/null +++ b/internal/kv/mocks/TxnKV.go @@ -0,0 +1,218 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// TxnKV is an autogenerated mock type for the TxnKV type +type TxnKV struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *TxnKV) Close() { + _m.Called() +} + +// Load provides a mock function with given fields: key +func (_m *TxnKV) Load(key string) (string, error) { + ret := _m.Called(key) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(key) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadWithPrefix provides a mock function with given fields: key +func (_m *TxnKV) LoadWithPrefix(key string) ([]string, []string, error) { + ret := _m.Called(key) + + var r0 []string + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 []string + if rf, ok := ret.Get(1).(func(string) []string); ok { + r1 = rf(key) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]string) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(string) error); ok { + r2 = rf(key) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MultiLoad provides a mock function with given fields: keys +func (_m *TxnKV) MultiLoad(keys []string) ([]string, error) { + ret := _m.Called(keys) + + var r0 []string + if rf, ok := ret.Get(0).(func([]string) []string); ok { + r0 = rf(keys) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]string) error); ok { + r1 = rf(keys) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MultiRemove provides a mock function with given fields: keys +func (_m *TxnKV) MultiRemove(keys []string) error { + ret := _m.Called(keys) + + var r0 error + if rf, ok := ret.Get(0).(func([]string) error); ok { + r0 = rf(keys) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiRemoveWithPrefix provides a mock function with given fields: keys +func (_m *TxnKV) MultiRemoveWithPrefix(keys []string) error { + ret := _m.Called(keys) + + var r0 error + if rf, ok := ret.Get(0).(func([]string) error); ok { + r0 = rf(keys) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiSave provides a mock function with given fields: kvs +func (_m *TxnKV) MultiSave(kvs map[string]string) error { + ret := _m.Called(kvs) + + var r0 error + if rf, ok := ret.Get(0).(func(map[string]string) error); ok { + r0 = rf(kvs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiSaveAndRemove provides a mock function with given fields: saves, removals +func (_m *TxnKV) MultiSaveAndRemove(saves map[string]string, removals []string) error { + ret := _m.Called(saves, removals) + + var r0 error + if rf, ok := ret.Get(0).(func(map[string]string, []string) error); ok { + r0 = rf(saves, removals) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MultiSaveAndRemoveWithPrefix provides a mock function with given fields: saves, removals +func (_m *TxnKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error { + ret := _m.Called(saves, removals) + + var r0 error + if rf, ok := ret.Get(0).(func(map[string]string, []string) error); ok { + r0 = rf(saves, removals) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Remove provides a mock function with given fields: key +func (_m *TxnKV) Remove(key string) error { + ret := _m.Called(key) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveWithPrefix provides a mock function with given fields: key +func (_m *TxnKV) RemoveWithPrefix(key string) error { + ret := _m.Called(key) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Save provides a mock function with given fields: key, value +func (_m *TxnKV) Save(key string, value string) error { + ret := _m.Called(key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewTxnKV interface { + mock.TestingT + Cleanup(func()) +} + +// NewTxnKV creates a new instance of TxnKV. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTxnKV(t mockConstructorTestingTNewTxnKV) *TxnKV { + mock := &TxnKV{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/log/meta_logger_test.go b/internal/log/meta_logger_test.go index 78b73b841b..a5eab75da9 100644 --- a/internal/log/meta_logger_test.go +++ b/internal/log/meta_logger_test.go @@ -32,8 +32,6 @@ func TestMetaLogger(t *testing.T) { WithOperation(DropCollection).Info() ts.assertMessagesContains("CollectionID=0") - ts.assertMessagesContains("CollectionMeta=eyJUZW5hbnRJRCI6IiIsIkNvbGxlY3Rpb25JRCI6MCwiUGFydGl0aW9ucyI6bnVsbCwiTmFtZSI6IiIsIkRlc2NyaXB0aW9uIjoiIiwiQXV0b0lEIjpmYWxzZSwiRmllbGRzIjpudWxsLCJWaXJ0dWFsQ2hhbm5lbE5hbWVzIjpudWxsLCJQaHlzaWNhbENoYW5uZWxOYW1lcyI6bnVsbCwiU2hhcmRzTnVtIjowLCJTdGFydFBvc2l0aW9ucyI6bnVsbCwiQ3JlYXRlVGltZSI6MCwiQ29uc2lzdGVuY3lMZXZlbCI6MCwiQWxpYXNlcyI6bnVsbCwiRXh0cmEiOm51bGx9") - ts.assertMessagesContains("IndexMeta=eyJUZW5hbnRJRCI6IiIsIkNvbGxlY3Rpb25JRCI6MCwiRmllbGRJRCI6MCwiSW5kZXhJRCI6MCwiSW5kZXhOYW1lIjoiIiwiSXNEZWxldGVkIjpmYWxzZSwiQ3JlYXRlVGltZSI6MCwiVHlwZVBhcmFtcyI6bnVsbCwiSW5kZXhQYXJhbXMiOm51bGx9") ts.assertMessagesContains("CollectionName=coll") ts.assertMessagesContains("PartitionID=0") ts.assertMessagesContains("PartitionName=part") diff --git a/internal/metastore/catalog.go b/internal/metastore/catalog.go index 2e651d53e2..c56110e723 100644 --- a/internal/metastore/catalog.go +++ b/internal/metastore/catalog.go @@ -16,9 +16,11 @@ type RootCoordCatalog interface { ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error + AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType AlterType, ts typeutil.Timestamp) error CreatePartition(ctx context.Context, partition *model.Partition, ts typeutil.Timestamp) error DropPartition(ctx context.Context, collectionID typeutil.UniqueID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error + AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType AlterType, ts typeutil.Timestamp) error CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error DropAlias(ctx context.Context, alias string, ts typeutil.Timestamp) error @@ -53,6 +55,18 @@ const ( MODIFY ) +func (t AlterType) String() string { + switch t { + case ADD: + return "ADD" + case DELETE: + return "DELETE" + case MODIFY: + return "MODIFY" + } + return "" +} + type DataCoordCatalog interface { ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) AddSegment(ctx context.Context, segment *datapb.SegmentInfo) error diff --git a/internal/metastore/catalog_test.go b/internal/metastore/catalog_test.go new file mode 100644 index 0000000000..5a79eec4b8 --- /dev/null +++ b/internal/metastore/catalog_test.go @@ -0,0 +1,35 @@ +package metastore + +import "testing" + +func TestAlterType_String(t *testing.T) { + tests := []struct { + name string + t AlterType + want string + }{ + { + t: ADD, + want: "ADD", + }, + { + t: DELETE, + want: "DELETE", + }, + { + t: MODIFY, + want: "MODIFY", + }, + { + t: -1, + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.t.String(); got != tt.want { + t.Errorf("String() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/metastore/db/dao/collection.go b/internal/metastore/db/dao/collection.go index 8c666d9326..03aa07089c 100644 --- a/internal/metastore/db/dao/collection.go +++ b/internal/metastore/db/dao/collection.go @@ -91,3 +91,27 @@ func (s *collectionDb) Insert(in *dbmodel.Collection) error { return nil } + +func generateCollectionUpdatesWithoutID(in *dbmodel.Collection) map[string]interface{} { + ret := map[string]interface{}{ + "tenant_id": in.TenantID, + "collection_id": in.CollectionID, + "collection_name": in.CollectionName, + "description": in.Description, + "auto_id": in.AutoID, + "shards_num": in.ShardsNum, + "start_position": in.StartPosition, + "consistency_level": in.ConsistencyLevel, + "status": in.Status, + "ts": in.Ts, + "is_deleted": in.IsDeleted, + "created_at": in.CreatedAt, + "updated_at": in.UpdatedAt, + } + return ret +} + +func (s *collectionDb) Update(in *dbmodel.Collection) error { + updates := generateCollectionUpdatesWithoutID(in) + return s.db.Model(&dbmodel.Collection{}).Where("id = ?", in.ID).Updates(updates).Error +} diff --git a/internal/metastore/db/dao/collection_test.go b/internal/metastore/db/dao/collection_test.go index 314a20cfa5..d5bd55c204 100644 --- a/internal/metastore/db/dao/collection_test.go +++ b/internal/metastore/db/dao/collection_test.go @@ -358,8 +358,8 @@ func TestCollection_Insert(t *testing.T) { // expectation mock.ExpectBegin() - mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`"). - WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt). + mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`"). + WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Status, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() @@ -386,8 +386,8 @@ func TestCollection_Insert_Error(t *testing.T) { // expectation mock.ExpectBegin() - mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`"). - WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt). + mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`"). + WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Status, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt). WillReturnError(errors.New("test error")) mock.ExpectRollback() @@ -423,3 +423,61 @@ func ErrorExec(f func()) { f() mock.ExpectRollback() } + +func Test_collectionDb_Update(t *testing.T) { + t.Run("normal case", func(t *testing.T) { + var collection = &dbmodel.Collection{ + TenantID: "", + CollectionID: collID1, + CollectionName: "test_collection_name_1", + Description: "", + AutoID: false, + ShardsNum: int32(2), + StartPosition: "", + ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually), + Ts: ts, + IsDeleted: false, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + // expectation + mock.ExpectBegin() + mock.ExpectExec("UPDATE `collections` SET `auto_id`=?,`collection_id`=?,`collection_name`=?,`consistency_level`=?,`created_at`=?,`description`=?,`is_deleted`=?,`shards_num`=?,`start_position`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?"). + WithArgs(collection.AutoID, collection.CollectionID, collection.CollectionName, collection.ConsistencyLevel, collection.CreatedAt, collection.Description, collection.IsDeleted, collection.ShardsNum, collection.StartPosition, collection.Status, collection.TenantID, collection.Ts, collection.UpdatedAt, collection.ID). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + // actual + err := collTestDb.Update(collection) + assert.Nil(t, err) + }) + + t.Run("error", func(t *testing.T) { + var collection = &dbmodel.Collection{ + TenantID: "", + CollectionID: collID1, + CollectionName: "test_collection_name_1", + Description: "", + AutoID: false, + ShardsNum: int32(2), + StartPosition: "", + ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually), + Ts: ts, + IsDeleted: false, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + // expectation + mock.ExpectBegin() + mock.ExpectExec("UPDATE `collections` SET `auto_id`=?,`collection_id`=?,`collection_name`=?,`consistency_level`=?,`created_at`=?,`description`=?,`is_deleted`=?,`shards_num`=?,`start_position`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?"). + WithArgs(collection.AutoID, collection.CollectionID, collection.CollectionName, collection.ConsistencyLevel, collection.CreatedAt, collection.Description, collection.IsDeleted, collection.ShardsNum, collection.StartPosition, collection.Status, collection.TenantID, collection.Ts, collection.UpdatedAt, collection.ID). + WillReturnError(errors.New("error mock Update")) + mock.ExpectRollback() + + // actual + err := collTestDb.Update(collection) + assert.Error(t, err) + }) +} diff --git a/internal/metastore/db/dao/partition.go b/internal/metastore/db/dao/partition.go index 1c6f025a3f..38f0bc05b8 100644 --- a/internal/metastore/db/dao/partition.go +++ b/internal/metastore/db/dao/partition.go @@ -33,3 +33,24 @@ func (s *partitionDb) Insert(in []*dbmodel.Partition) error { return nil } + +func generatePartitionUpdatesWithoutID(in *dbmodel.Partition) map[string]interface{} { + ret := map[string]interface{}{ + "tenant_id": in.TenantID, + "partition_id": in.PartitionID, + "partition_name": in.PartitionName, + "partition_created_timestamp": in.PartitionCreatedTimestamp, + "collection_id": in.CollectionID, + "status": in.Status, + "ts": in.Ts, + "is_deleted": in.IsDeleted, + "created_at": in.CreatedAt, + "updated_at": in.UpdatedAt, + } + return ret +} + +func (s *partitionDb) Update(in *dbmodel.Partition) error { + updates := generatePartitionUpdatesWithoutID(in) + return s.db.Model(&dbmodel.Partition{}).Where("id = ?", in.ID).Updates(updates).Error +} diff --git a/internal/metastore/db/dao/partition_test.go b/internal/metastore/db/dao/partition_test.go index d343417716..df17eac41d 100644 --- a/internal/metastore/db/dao/partition_test.go +++ b/internal/metastore/db/dao/partition_test.go @@ -65,8 +65,8 @@ func TestPartition_Insert(t *testing.T) { // expectation mock.ExpectBegin() - mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)"). - WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt). + mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?)"). + WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Status, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() @@ -101,3 +101,54 @@ func TestPartition_Insert_Error(t *testing.T) { err := partitionTestDb.Insert(partitions) assert.Error(t, err) } + +func Test_partitionDb_Update(t *testing.T) { + t.Run("normal case", func(t *testing.T) { + partition := &dbmodel.Partition{ + ID: 100, + TenantID: tenantID, + PartitionID: fieldID1, + PartitionName: "test_field_1", + PartitionCreatedTimestamp: typeutil.Timestamp(1000), + CollectionID: collID1, + Ts: ts, + IsDeleted: false, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + mock.ExpectBegin() + mock.ExpectExec("UPDATE `partitions` SET `collection_id`=?,`created_at`=?,`is_deleted`=?,`partition_created_timestamp`=?,`partition_id`=?,`partition_name`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?"). + WithArgs(partition.CollectionID, partition.CreatedAt, partition.IsDeleted, partition.PartitionCreatedTimestamp, partition.PartitionID, partition.PartitionName, partition.Status, partition.TenantID, partition.Ts, partition.UpdatedAt, partition.ID). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + err := partitionTestDb.Update(partition) + assert.NoError(t, err) + }) + + t.Run("error case", func(t *testing.T) { + partition := &dbmodel.Partition{ + ID: 100, + TenantID: tenantID, + PartitionID: fieldID1, + PartitionName: "test_field_1", + PartitionCreatedTimestamp: typeutil.Timestamp(1000), + CollectionID: collID1, + Ts: ts, + IsDeleted: false, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + mock.ExpectBegin() + mock.ExpectExec("UPDATE `partitions` SET `collection_id`=?,`created_at`=?,`is_deleted`=?,`partition_created_timestamp`=?,`partition_id`=?,`partition_name`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?"). + WithArgs(partition.CollectionID, partition.CreatedAt, partition.IsDeleted, partition.PartitionCreatedTimestamp, partition.PartitionID, partition.PartitionName, partition.Status, partition.TenantID, partition.Ts, partition.UpdatedAt, partition.ID). + WillReturnError(errors.New("error mock Update Partition")) + mock.ExpectRollback() + + err := partitionTestDb.Update(partition) + assert.Error(t, err) + }) + +} diff --git a/internal/metastore/db/dbmodel/collection.go b/internal/metastore/db/dbmodel/collection.go index eb66263ffb..113e1f7c2d 100644 --- a/internal/metastore/db/dbmodel/collection.go +++ b/internal/metastore/db/dbmodel/collection.go @@ -21,6 +21,7 @@ type Collection struct { ShardsNum int32 `gorm:"shards_num"` StartPosition string `gorm:"start_position"` ConsistencyLevel int32 `gorm:"consistency_level"` + Status int32 `gorm:"status"` Ts typeutil.Timestamp `gorm:"ts"` IsDeleted bool `gorm:"is_deleted"` CreatedAt time.Time `gorm:"created_at"` @@ -39,6 +40,7 @@ type ICollectionDb interface { Get(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*Collection, error) GetCollectionIDByName(tenantID string, collectionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) Insert(in *Collection) error + Update(in *Collection) error } // model <---> db diff --git a/internal/metastore/db/dbmodel/mocks/ICollectionDb.go b/internal/metastore/db/dbmodel/mocks/ICollectionDb.go index 8e4cc5d0dd..18aec42029 100644 --- a/internal/metastore/db/dbmodel/mocks/ICollectionDb.go +++ b/internal/metastore/db/dbmodel/mocks/ICollectionDb.go @@ -116,6 +116,20 @@ func (_m *ICollectionDb) ListCollectionIDTs(tenantID string, ts uint64) ([]*dbmo return r0, r1 } +// Update provides a mock function with given fields: in +func (_m *ICollectionDb) Update(in *dbmodel.Collection) error { + ret := _m.Called(in) + + var r0 error + if rf, ok := ret.Get(0).(func(*dbmodel.Collection) error); ok { + r0 = rf(in) + } else { + r0 = ret.Error(0) + } + + return r0 +} + type mockConstructorTestingTNewICollectionDb interface { mock.TestingT Cleanup(func()) diff --git a/internal/metastore/db/dbmodel/mocks/IPartitionDb.go b/internal/metastore/db/dbmodel/mocks/IPartitionDb.go index 316c4829c1..194341b218 100644 --- a/internal/metastore/db/dbmodel/mocks/IPartitionDb.go +++ b/internal/metastore/db/dbmodel/mocks/IPartitionDb.go @@ -49,6 +49,20 @@ func (_m *IPartitionDb) Insert(in []*dbmodel.Partition) error { return r0 } +// Update provides a mock function with given fields: in +func (_m *IPartitionDb) Update(in *dbmodel.Partition) error { + ret := _m.Called(in) + + var r0 error + if rf, ok := ret.Get(0).(func(*dbmodel.Partition) error); ok { + r0 = rf(in) + } else { + r0 = ret.Error(0) + } + + return r0 +} + type mockConstructorTestingTNewIPartitionDb interface { mock.TestingT Cleanup(func()) diff --git a/internal/metastore/db/dbmodel/mocks/ISegmentIndexDb.go b/internal/metastore/db/dbmodel/mocks/ISegmentIndexDb.go index 423bba56f7..09827620f0 100644 --- a/internal/metastore/db/dbmodel/mocks/ISegmentIndexDb.go +++ b/internal/metastore/db/dbmodel/mocks/ISegmentIndexDb.go @@ -12,6 +12,29 @@ type ISegmentIndexDb struct { mock.Mock } +// Get provides a mock function with given fields: tenantID, collectionID, buildID +func (_m *ISegmentIndexDb) Get(tenantID string, collectionID int64, buildID int64) ([]*dbmodel.SegmentIndexResult, error) { + ret := _m.Called(tenantID, collectionID, buildID) + + var r0 []*dbmodel.SegmentIndexResult + if rf, ok := ret.Get(0).(func(string, int64, int64) []*dbmodel.SegmentIndexResult); ok { + r0 = rf(tenantID, collectionID, buildID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*dbmodel.SegmentIndexResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, int64, int64) error); ok { + r1 = rf(tenantID, collectionID, buildID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Insert provides a mock function with given fields: in func (_m *ISegmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error { ret := _m.Called(in) @@ -26,6 +49,29 @@ func (_m *ISegmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error { return r0 } +// List provides a mock function with given fields: tenantID +func (_m *ISegmentIndexDb) List(tenantID string) ([]*dbmodel.SegmentIndexResult, error) { + ret := _m.Called(tenantID) + + var r0 []*dbmodel.SegmentIndexResult + if rf, ok := ret.Get(0).(func(string) []*dbmodel.SegmentIndexResult); ok { + r0 = rf(tenantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*dbmodel.SegmentIndexResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(tenantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // MarkDeleted provides a mock function with given fields: tenantID, in func (_m *ISegmentIndexDb) MarkDeleted(tenantID string, in []*dbmodel.SegmentIndex) error { ret := _m.Called(tenantID, in) @@ -40,6 +86,20 @@ func (_m *ISegmentIndexDb) MarkDeleted(tenantID string, in []*dbmodel.SegmentInd return r0 } +// MarkDeletedByBuildID provides a mock function with given fields: tenantID, idxID +func (_m *ISegmentIndexDb) MarkDeletedByBuildID(tenantID string, idxID int64) error { + ret := _m.Called(tenantID, idxID) + + var r0 error + if rf, ok := ret.Get(0).(func(string, int64) error); ok { + r0 = rf(tenantID, idxID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // MarkDeletedByCollectionID provides a mock function with given fields: tenantID, collID func (_m *ISegmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID int64) error { ret := _m.Called(tenantID, collID) @@ -54,26 +114,12 @@ func (_m *ISegmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID int return r0 } -// MarkDeletedByIndexID provides a mock function with given fields: tenantID, idxID -func (_m *ISegmentIndexDb) MarkDeletedByIndexID(tenantID string, idxID int64) error { - ret := _m.Called(tenantID, idxID) - - var r0 error - if rf, ok := ret.Get(0).(func(string, int64) error); ok { - r0 = rf(tenantID, idxID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Upsert provides a mock function with given fields: in -func (_m *ISegmentIndexDb) Upsert(in []*dbmodel.SegmentIndex) error { +// Update provides a mock function with given fields: in +func (_m *ISegmentIndexDb) Update(in *dbmodel.SegmentIndex) error { ret := _m.Called(in) var r0 error - if rf, ok := ret.Get(0).(func([]*dbmodel.SegmentIndex) error); ok { + if rf, ok := ret.Get(0).(func(*dbmodel.SegmentIndex) error); ok { r0 = rf(in) } else { r0 = ret.Error(0) diff --git a/internal/metastore/db/dbmodel/partition.go b/internal/metastore/db/dbmodel/partition.go index 632b550412..f67dcc94af 100644 --- a/internal/metastore/db/dbmodel/partition.go +++ b/internal/metastore/db/dbmodel/partition.go @@ -14,6 +14,7 @@ type Partition struct { PartitionName string `gorm:"partition_name"` PartitionCreatedTimestamp uint64 `gorm:"partition_created_timestamp"` CollectionID int64 `gorm:"collection_id"` + Status int32 `gorm:"status"` Ts typeutil.Timestamp `gorm:"ts"` IsDeleted bool `gorm:"is_deleted"` CreatedAt time.Time `gorm:"created_at"` @@ -28,6 +29,7 @@ func (v Partition) TableName() string { type IPartitionDb interface { GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*Partition, error) Insert(in []*Partition) error + Update(in *Partition) error //MarkDeleted(tenantID string, collID typeutil.UniqueID) error } diff --git a/internal/metastore/db/rootcoord/table_catalog.go b/internal/metastore/db/rootcoord/table_catalog.go index a50da1262c..d05a6f818e 100644 --- a/internal/metastore/db/rootcoord/table_catalog.go +++ b/internal/metastore/db/rootcoord/table_catalog.go @@ -5,6 +5,11 @@ import ( "encoding/json" "fmt" "runtime" + "time" + + "github.com/milvus-io/milvus/internal/util/tsoutil" + + "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/util" @@ -56,6 +61,7 @@ func (tc *Catalog) CreateCollection(ctx context.Context, collection *model.Colle ShardsNum: collection.ShardsNum, StartPosition: startPositionsStr, ConsistencyLevel: int32(collection.ConsistencyLevel), + Status: int32(collection.State), Ts: ts, }) if err != nil { @@ -375,6 +381,47 @@ func (tc *Catalog) DropCollection(ctx context.Context, collection *model.Collect }) } +func (tc *Catalog) alterModifyCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error { + if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID { + return fmt.Errorf("altering tenant id or collection id is forbidden") + } + + var startPositionsStr string + if newColl.StartPositions != nil { + startPositionsBytes, err := json.Marshal(newColl.StartPositions) + if err != nil { + return fmt.Errorf("failed to marshal start positions: %s", err.Error()) + } + startPositionsStr = string(startPositionsBytes) + } + + createdAt, _ := tsoutil.ParseTS(newColl.CreateTime) + tenantID := contextutil.TenantID(ctx) + coll := &dbmodel.Collection{ + TenantID: tenantID, + CollectionID: newColl.CollectionID, + CollectionName: newColl.Name, + Description: newColl.Description, + AutoID: newColl.AutoID, + ShardsNum: newColl.ShardsNum, + StartPosition: startPositionsStr, + ConsistencyLevel: int32(newColl.ConsistencyLevel), + Status: int32(newColl.State), + Ts: ts, + CreatedAt: createdAt, + UpdatedAt: time.Now(), + } + + return tc.metaDomain.CollectionDb(ctx).Update(coll) +} + +func (tc *Catalog) AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType metastore.AlterType, ts typeutil.Timestamp) error { + if alterType == metastore.MODIFY { + return tc.alterModifyCollection(ctx, oldColl, newColl, ts) + } + return fmt.Errorf("altering collection doesn't support %s", alterType.String()) +} + func (tc *Catalog) CreatePartition(ctx context.Context, partition *model.Partition, ts typeutil.Timestamp) error { tenantID := contextutil.TenantID(ctx) @@ -384,6 +431,7 @@ func (tc *Catalog) CreatePartition(ctx context.Context, partition *model.Partiti PartitionName: partition.PartitionName, PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp, CollectionID: partition.CollectionID, + Status: int32(partition.State), Ts: ts, } err := tc.metaDomain.PartitionDb(ctx).Insert([]*dbmodel.Partition{p}) @@ -414,6 +462,30 @@ func (tc *Catalog) DropPartition(ctx context.Context, collectionID typeutil.Uniq return nil } +func (tc *Catalog) alterModifyPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error { + createdAt, _ := tsoutil.ParseTS(newPart.PartitionCreatedTimestamp) + p := &dbmodel.Partition{ + TenantID: contextutil.TenantID(ctx), + PartitionID: newPart.PartitionID, + PartitionName: newPart.PartitionName, + PartitionCreatedTimestamp: newPart.PartitionCreatedTimestamp, + CollectionID: newPart.CollectionID, + Status: int32(newPart.State), + Ts: ts, + IsDeleted: false, + CreatedAt: createdAt, + UpdatedAt: time.Now(), + } + return tc.metaDomain.PartitionDb(ctx).Update(p) +} + +func (tc *Catalog) AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType metastore.AlterType, ts typeutil.Timestamp) error { + if alterType == metastore.MODIFY { + return tc.alterModifyPartition(ctx, oldPart, newPart, ts) + } + return fmt.Errorf("altering partition doesn't support: %s", alterType.String()) +} + func (tc *Catalog) CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error { tenantID := contextutil.TenantID(ctx) diff --git a/internal/metastore/db/rootcoord/table_catalog_test.go b/internal/metastore/db/rootcoord/table_catalog_test.go index 15547a87b1..4674d3e291 100644 --- a/internal/metastore/db/rootcoord/table_catalog_test.go +++ b/internal/metastore/db/rootcoord/table_catalog_test.go @@ -8,6 +8,10 @@ import ( "testing" "time" + "github.com/milvus-io/milvus/internal/metastore" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/common" "github.com/milvus-io/milvus/internal/metastore/db/dbmodel" "github.com/milvus-io/milvus/internal/metastore/db/dbmodel/mocks" @@ -765,6 +769,62 @@ func TestTableCatalog_DropCollection_TsNot0_PartitionInsertError(t *testing.T) { require.Error(t, gotErr) } +func TestCatalog_AlterCollection(t *testing.T) { + coll := &model.Collection{ + TenantID: tenantID, + CollectionID: collID1, + Name: collName1, + State: pb.CollectionState_CollectionCreated, + Aliases: []string{collAlias1, collAlias2}, + } + newColl := &model.Collection{ + TenantID: tenantID, + CollectionID: collID1, + Name: collName1, + State: pb.CollectionState_CollectionDropping, + Aliases: []string{collAlias1, collAlias2}, + } + + collDbMock.On("Update", mock.Anything).Return(nil).Once() + + gotErr := mockCatalog.AlterCollection(ctx, coll, newColl, metastore.MODIFY, ts) + require.NoError(t, gotErr) +} + +func TestTableCatalog_AlterCollection_TsNot0_AlterTypeError(t *testing.T) { + coll := &model.Collection{ + TenantID: tenantID, + CollectionID: collID1, + Name: collName1, + State: pb.CollectionState_CollectionCreated, + Aliases: []string{collAlias1, collAlias2}, + } + + gotErr := mockCatalog.AlterCollection(ctx, coll, coll, metastore.ADD, ts) + require.Error(t, gotErr) + + gotErr = mockCatalog.AlterCollection(ctx, coll, coll, metastore.DELETE, ts) + require.Error(t, gotErr) +} + +func TestCatalog_AlterCollection_TsNot0_CollInsertError(t *testing.T) { + coll := &model.Collection{ + TenantID: tenantID, + CollectionID: collID1, + Name: collName1, + State: pb.CollectionState_CollectionCreated, + Aliases: []string{collAlias1, collAlias2}, + } + + // expectation + errTest := errors.New("test error") + collDbMock.On("Update", mock.Anything).Return(errTest).Once() + + // actual + gotErr := mockCatalog.AlterCollection(ctx, coll, coll, metastore.MODIFY, ts) + require.Error(t, gotErr) +} + func TestTableCatalog_CreatePartition(t *testing.T) { partition := &model.Partition{ PartitionID: partitionID1, @@ -816,6 +876,63 @@ func TestTableCatalog_DropPartition_TsNot0_PartitionInsertError(t *testing.T) { gotErr := mockCatalog.DropPartition(ctx, collID1, partitionID1, ts) require.Error(t, gotErr) } + +func TestCatalog_AlterPartition(t *testing.T) { + partition := &model.Partition{ + PartitionID: partitionID1, + PartitionName: "test_partition_name_1", + PartitionCreatedTimestamp: 1, + CollectionID: collID1, + State: pb.PartitionState_PartitionCreated, + } + newPartition := &model.Partition{ + PartitionID: partitionID1, + PartitionName: "test_partition_name_1", + PartitionCreatedTimestamp: 1, + CollectionID: collID1, + State: pb.PartitionState_PartitionDropping, + } + + partitionDbMock.On("Update", mock.Anything).Return(nil).Once() + + gotErr := mockCatalog.AlterPartition(ctx, partition, newPartition, metastore.MODIFY, ts) + require.NoError(t, gotErr) +} + +func TestCatalog_AlterPartition_TsNot0_AlterTypeError(t *testing.T) { + partition := &model.Partition{ + PartitionID: partitionID1, + PartitionName: "test_partition_name_1", + PartitionCreatedTimestamp: 1, + CollectionID: collID1, + State: pb.PartitionState_PartitionCreated, + } + + gotErr := mockCatalog.AlterPartition(ctx, partition, partition, metastore.ADD, ts) + require.Error(t, gotErr) + + gotErr = mockCatalog.AlterPartition(ctx, partition, partition, metastore.DELETE, ts) + require.Error(t, gotErr) +} + +func TestCatalog_AlterPartition_TsNot0_PartitionInsertError(t *testing.T) { + partition := &model.Partition{ + PartitionID: partitionID1, + PartitionName: "test_partition_name_1", + PartitionCreatedTimestamp: 1, + CollectionID: collID1, + State: pb.PartitionState_PartitionCreated, + } + + // expectation + errTest := errors.New("test error") + partitionDbMock.On("Update", mock.Anything).Return(errTest).Once() + + // actual + gotErr := mockCatalog.AlterPartition(ctx, partition, partition, metastore.MODIFY, ts) + require.Error(t, gotErr) +} + func TestTableCatalog_CreateAlias(t *testing.T) { alias := &model.Alias{ CollectionID: collID1, diff --git a/internal/metastore/kv/rootcoord/kv_catalog.go b/internal/metastore/kv/rootcoord/kv_catalog.go index a00fd138ba..bbf42d664f 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog.go +++ b/internal/metastore/kv/rootcoord/kv_catalog.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" + "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/util/crypto" "github.com/milvus-io/milvus/internal/util" @@ -24,6 +25,10 @@ import ( "github.com/milvus-io/milvus/internal/util/typeutil" ) +const ( + maxTxnNum = 64 +) + // prefix/collection/collection_id -> CollectionInfo // prefix/partitions/collection_id/partition_id -> PartitionInfo // prefix/aliases/alias_name -> AliasInfo @@ -72,7 +77,13 @@ func buildKvs(keys, values []string) (map[string]string, error) { return ret, nil } -// TODO: atomicity should be promised outside. +func min(a, b int) int { + if a < b { + return a + } + return b +} + func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts typeutil.Timestamp) error { keys := make([]string, 0, len(kvs)) values := make([]string, 0, len(kvs)) @@ -80,19 +91,12 @@ func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts keys = append(keys, k) values = append(values, v) } - min := func(a, b int) int { - if a < b { - return a - } - return b - } for i := 0; i < len(kvs); i = i + maxTxnNum { end := min(i+maxTxnNum, len(keys)) batch, err := buildKvs(keys[i:end], values[i:end]) if err != nil { return err } - // TODO: atomicity is not promised. Garbage will be generated. if err := snapshot.MultiSave(batch, ts); err != nil { return err } @@ -100,16 +104,43 @@ func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts return nil } +func batchMultiSaveAndRemoveWithPrefix(snapshot kv.SnapShotKV, maxTxnNum int, saves map[string]string, removals []string, ts typeutil.Timestamp) error { + if err := batchSave(snapshot, maxTxnNum, saves, ts); err != nil { + return err + } + for i := 0; i < len(removals); i = i + maxTxnNum { + end := min(i+maxTxnNum, len(removals)) + batch := removals[i:end] + if err := snapshot.MultiSaveAndRemoveWithPrefix(nil, batch, ts); err != nil { + return err + } + } + return nil +} + func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { + if coll.State != pb.CollectionState_CollectionCreating { + return fmt.Errorf("cannot create collection with state: %s, collection: %s", coll.State.String(), coll.Name) + } + k1 := buildCollectionKey(coll.CollectionID) collInfo := model.MarshalCollectionModel(coll) v1, err := proto.Marshal(collInfo) if err != nil { - log.Error("create collection marshal fail", zap.String("key", k1), zap.Error(err)) + return fmt.Errorf("failed to marshal collection info: %s", err.Error()) + } + + // Due to the limit of etcd txn number, we must split these kvs into several batches. + // Save collection key first, and the state of collection is creating. + // If we save collection key with error, then no garbage will be generated and error will be raised. + // If we succeeded to save collection but failed to save other related keys, the garbage meta can be removed + // outside and the collection won't be seen by any others (since it's of creating state). + // However, if we save other keys first, there is no chance to remove the intermediate meta. + if err := kc.Snapshot.Save(k1, string(v1), ts); err != nil { return err } - kvs := map[string]string{k1: string(v1)} + kvs := map[string]string{} // save partition info to newly path. for _, partition := range coll.Partitions { @@ -135,8 +166,8 @@ func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection, kvs[k] = string(v) } - // TODO: atomicity should be promised outside. - maxTxnNum := 64 + // Though batchSave is not atomic enough, we can promise the atomicity outside. + // Recovering from failure, if we found collection is creating, we should removing all these related meta. return batchSave(kc.Snapshot, maxTxnNum, kvs, ts) } @@ -145,7 +176,7 @@ func (kc *Catalog) loadCollection(ctx context.Context, collectionID typeutil.Uni collVal, err := kc.Snapshot.Load(collKey, ts) if err != nil { log.Error("get collection meta fail", zap.String("key", collKey), zap.Error(err)) - return nil, err + return nil, fmt.Errorf("can't find collection: %d", collectionID) } collMeta := &pb.CollectionInfo{} @@ -334,39 +365,80 @@ func (kc *Catalog) AlterAlias(ctx context.Context, alias *model.Alias, ts typeut } func (kc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error { - delMetakeysSnap := []string{ - fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionInfo.CollectionID), - } + collectionKey := buildCollectionKey(collectionInfo.CollectionID) + + var delMetakeysSnap []string for _, alias := range collectionInfo.Aliases { delMetakeysSnap = append(delMetakeysSnap, fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias), ) } + delMetakeysSnap = append(delMetakeysSnap, buildPartitionPrefix(collectionInfo.CollectionID)) + delMetakeysSnap = append(delMetakeysSnap, buildFieldPrefix(collectionInfo.CollectionID)) - err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(map[string]string{}, delMetakeysSnap, ts) - if err != nil { - log.Error("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err)) + // Though batchMultiSaveAndRemoveWithPrefix is not atomic enough, we can promise atomicity outside. + // If we found collection under dropping state, we'll know that gc is not completely on this collection. + // However, if we remove collection first, we cannot remove other metas. + if err := batchMultiSaveAndRemoveWithPrefix(kc.Snapshot, maxTxnNum, nil, delMetakeysSnap, ts); err != nil { return err } - // Txn operation - kvs := map[string]string{} - for k, v := range collectionInfo.Extra { - kvs[k] = v + // if we found collection dropping, we should try removing related resources. + return kc.Snapshot.MultiSaveAndRemoveWithPrefix(nil, []string{collectionKey}, ts) +} + +func (kc *Catalog) alterModifyCollection(oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error { + if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID { + return fmt.Errorf("altering tenant id or collection id is forbidden") } - - //delMetaKeysTxn := []string{ - // fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID), - // fmt.Sprintf("%s/%d", IndexMetaPrefix, collectionInfo.CollectionID), - //} - - err = kc.Txn.MultiSave(kvs) + oldCollClone := oldColl.Clone() + oldCollClone.Name = newColl.Name + oldCollClone.Description = newColl.Description + oldCollClone.AutoID = newColl.AutoID + oldCollClone.VirtualChannelNames = newColl.VirtualChannelNames + oldCollClone.PhysicalChannelNames = newColl.PhysicalChannelNames + oldCollClone.StartPositions = newColl.StartPositions + oldCollClone.ShardsNum = newColl.ShardsNum + oldCollClone.CreateTime = newColl.CreateTime + oldCollClone.ConsistencyLevel = newColl.ConsistencyLevel + oldCollClone.State = newColl.State + key := buildCollectionKey(oldColl.CollectionID) + value, err := proto.Marshal(model.MarshalCollectionModel(oldCollClone)) if err != nil { - log.Warn("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err)) return err } + return kc.Snapshot.Save(key, string(value), ts) +} - return nil +func (kc *Catalog) AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType metastore.AlterType, ts typeutil.Timestamp) error { + if alterType == metastore.MODIFY { + return kc.alterModifyCollection(oldColl, newColl, ts) + } + return fmt.Errorf("altering collection doesn't support %s", alterType.String()) +} + +func (kc *Catalog) alterModifyPartition(oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error { + if oldPart.CollectionID != newPart.CollectionID || oldPart.PartitionID != newPart.PartitionID { + return fmt.Errorf("altering collection id or partition id is forbidden") + } + oldPartClone := oldPart.Clone() + newPartClone := newPart.Clone() + oldPartClone.PartitionName = newPartClone.PartitionName + oldPartClone.PartitionCreatedTimestamp = newPartClone.PartitionCreatedTimestamp + oldPartClone.State = newPartClone.State + key := buildPartitionKey(oldPart.CollectionID, oldPart.PartitionID) + value, err := proto.Marshal(model.MarshalPartitionModel(oldPartClone)) + if err != nil { + return err + } + return kc.Snapshot.Save(key, string(value), ts) +} + +func (kc *Catalog) AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType metastore.AlterType, ts typeutil.Timestamp) error { + if alterType == metastore.MODIFY { + return kc.alterModifyPartition(oldPart, newPart, ts) + } + return fmt.Errorf("altering partition doesn't support %s", alterType.String()) } func dropPartition(collMeta *pb.CollectionInfo, partitionID typeutil.UniqueID) { diff --git a/internal/metastore/kv/rootcoord/kv_catalog_test.go b/internal/metastore/kv/rootcoord/kv_catalog_test.go index f22f33747f..2d851200a0 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog_test.go +++ b/internal/metastore/kv/rootcoord/kv_catalog_test.go @@ -5,6 +5,10 @@ import ( "errors" "testing" + "github.com/milvus-io/milvus/internal/kv/mocks" + + "github.com/milvus-io/milvus/internal/metastore" + "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/util/typeutil" @@ -758,3 +762,325 @@ func Test_batchSave(t *testing.T) { assert.Error(t, err) }) } + +func Test_min(t *testing.T) { + type args struct { + a int + b int + } + tests := []struct { + name string + args args + want int + }{ + { + args: args{a: 1, b: 2}, + want: 1, + }, + { + args: args{a: 4, b: 3}, + want: 3, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := min(tt.args.a, tt.args.b); got != tt.want { + t.Errorf("min() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_batchMultiSaveAndRemoveWithPrefix(t *testing.T) { + t.Run("failed to save", func(t *testing.T) { + snapshot := kv.NewMockSnapshotKV() + snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error { + return errors.New("error mock MultiSave") + } + saves := map[string]string{"k": "v"} + err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, []string{}, 0) + assert.Error(t, err) + }) + t.Run("failed to remove", func(t *testing.T) { + snapshot := kv.NewMockSnapshotKV() + snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error { + return nil + } + snapshot.MultiSaveAndRemoveWithPrefixFunc = func(saves map[string]string, removals []string, ts typeutil.Timestamp) error { + return errors.New("error mock MultiSaveAndRemoveWithPrefix") + } + saves := map[string]string{"k": "v"} + removals := []string{"prefix1", "prefix2"} + err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, removals, 0) + assert.Error(t, err) + }) + t.Run("normal case", func(t *testing.T) { + snapshot := kv.NewMockSnapshotKV() + snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error { + return nil + } + snapshot.MultiSaveAndRemoveWithPrefixFunc = func(saves map[string]string, removals []string, ts typeutil.Timestamp) error { + return nil + } + saves := map[string]string{"k": "v"} + removals := []string{"prefix1", "prefix2"} + err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, removals, 0) + assert.NoError(t, err) + }) +} + +func TestCatalog_AlterCollection(t *testing.T) { + t.Run("add", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + err := kc.AlterCollection(ctx, nil, nil, metastore.ADD, 0) + assert.Error(t, err) + }) + + t.Run("delete", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + err := kc.AlterCollection(ctx, nil, nil, metastore.DELETE, 0) + assert.Error(t, err) + }) + + t.Run("modify", func(t *testing.T) { + snapshot := kv.NewMockSnapshotKV() + kvs := map[string]string{} + snapshot.SaveFunc = func(key string, value string, ts typeutil.Timestamp) error { + kvs[key] = value + return nil + } + kc := &Catalog{Snapshot: snapshot} + ctx := context.Background() + var collectionID int64 = 1 + oldC := &model.Collection{CollectionID: collectionID, State: pb.CollectionState_CollectionCreating} + newC := &model.Collection{CollectionID: collectionID, State: pb.CollectionState_CollectionCreated} + err := kc.AlterCollection(ctx, oldC, newC, metastore.MODIFY, 0) + assert.NoError(t, err) + key := buildCollectionKey(collectionID) + value, ok := kvs[key] + assert.True(t, ok) + var collPb pb.CollectionInfo + err = proto.Unmarshal([]byte(value), &collPb) + assert.NoError(t, err) + got := model.UnmarshalCollectionModel(&collPb) + assert.Equal(t, pb.CollectionState_CollectionCreated, got.State) + }) + + t.Run("modify, tenant id changed", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + var collectionID int64 = 1 + oldC := &model.Collection{TenantID: "1", CollectionID: collectionID, State: pb.CollectionState_CollectionCreating} + newC := &model.Collection{TenantID: "2", CollectionID: collectionID, State: pb.CollectionState_CollectionCreated} + err := kc.AlterCollection(ctx, oldC, newC, metastore.MODIFY, 0) + assert.Error(t, err) + }) +} + +func TestCatalog_AlterPartition(t *testing.T) { + t.Run("add", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + err := kc.AlterPartition(ctx, nil, nil, metastore.ADD, 0) + assert.Error(t, err) + }) + + t.Run("delete", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + err := kc.AlterPartition(ctx, nil, nil, metastore.DELETE, 0) + assert.Error(t, err) + }) + + t.Run("modify", func(t *testing.T) { + snapshot := kv.NewMockSnapshotKV() + kvs := map[string]string{} + snapshot.SaveFunc = func(key string, value string, ts typeutil.Timestamp) error { + kvs[key] = value + return nil + } + kc := &Catalog{Snapshot: snapshot} + ctx := context.Background() + var collectionID int64 = 1 + var partitionID int64 = 2 + oldP := &model.Partition{PartitionID: partitionID, CollectionID: collectionID, State: pb.PartitionState_PartitionCreating} + newP := &model.Partition{PartitionID: partitionID, CollectionID: collectionID, State: pb.PartitionState_PartitionCreated} + err := kc.AlterPartition(ctx, oldP, newP, metastore.MODIFY, 0) + assert.NoError(t, err) + key := buildPartitionKey(collectionID, partitionID) + value, ok := kvs[key] + assert.True(t, ok) + var partPb pb.PartitionInfo + err = proto.Unmarshal([]byte(value), &partPb) + assert.NoError(t, err) + got := model.UnmarshalPartitionModel(&partPb) + assert.Equal(t, pb.PartitionState_PartitionCreated, got.State) + }) + + t.Run("modify, tenant id changed", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + var collectionID int64 = 1 + oldP := &model.Partition{PartitionID: 1, CollectionID: collectionID, State: pb.PartitionState_PartitionCreating} + newP := &model.Partition{PartitionID: 2, CollectionID: collectionID, State: pb.PartitionState_PartitionCreated} + err := kc.AlterPartition(ctx, oldP, newP, metastore.MODIFY, 0) + assert.Error(t, err) + }) +} + +type mockSnapshotOpt func(ss *mocks.SnapShotKV) + +func newMockSnapshot(t *testing.T, opts ...mockSnapshotOpt) *mocks.SnapShotKV { + ss := mocks.NewSnapShotKV(t) + for _, opt := range opts { + opt(ss) + } + return ss +} + +func withMockSave(saveErr error) mockSnapshotOpt { + return func(ss *mocks.SnapShotKV) { + ss.On( + "Save", + mock.AnythingOfType("string"), + mock.AnythingOfType("string"), + mock.AnythingOfType("uint64")). + Return(saveErr) + } +} + +func withMockMultiSave(multiSaveErr error) mockSnapshotOpt { + return func(ss *mocks.SnapShotKV) { + ss.On( + "MultiSave", + mock.AnythingOfType("map[string]string"), + mock.AnythingOfType("uint64")). + Return(multiSaveErr) + } +} + +func withMockMultiSaveAndRemoveWithPrefix(err error) mockSnapshotOpt { + return func(ss *mocks.SnapShotKV) { + ss.On( + "MultiSaveAndRemoveWithPrefix", + mock.AnythingOfType("map[string]string"), + mock.AnythingOfType("[]string"), + mock.AnythingOfType("uint64")). + Return(err) + } +} + +func TestCatalog_CreateCollection(t *testing.T) { + t.Run("collection not creating", func(t *testing.T) { + kc := &Catalog{} + ctx := context.Background() + coll := &model.Collection{State: pb.CollectionState_CollectionDropping} + err := kc.CreateCollection(ctx, coll, 100) + assert.Error(t, err) + }) + + t.Run("failed to save collection", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t, withMockSave(errors.New("error mock Save"))) + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{State: pb.CollectionState_CollectionCreating} + err := kc.CreateCollection(ctx, coll, 100) + assert.Error(t, err) + }) + + t.Run("succeed to save collection but failed to save other keys", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(errors.New("error mock MultiSave"))) + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{ + Partitions: []*model.Partition{ + {PartitionName: "test"}, + }, + State: pb.CollectionState_CollectionCreating, + } + err := kc.CreateCollection(ctx, coll, 100) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(nil)) + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{ + Partitions: []*model.Partition{ + {PartitionName: "test"}, + }, + State: pb.CollectionState_CollectionCreating, + } + err := kc.CreateCollection(ctx, coll, 100) + assert.NoError(t, err) + }) +} + +func TestCatalog_DropCollection(t *testing.T) { + t.Run("failed to remove", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemoveWithPrefix(errors.New("error mock MultiSaveAndRemoveWithPrefix"))) + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{ + Partitions: []*model.Partition{ + {PartitionName: "test"}, + }, + State: pb.CollectionState_CollectionDropping, + } + err := kc.DropCollection(ctx, coll, 100) + assert.Error(t, err) + }) + + t.Run("succeed to remove first, but failed to remove twice", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t) + removeOtherCalled := false + removeCollectionCalled := false + mockSnapshot.On( + "MultiSaveAndRemoveWithPrefix", + mock.AnythingOfType("map[string]string"), + mock.AnythingOfType("[]string"), + mock.AnythingOfType("uint64")). + Return(func(map[string]string, []string, typeutil.Timestamp) error { + removeOtherCalled = true + return nil + }).Once() + mockSnapshot.On( + "MultiSaveAndRemoveWithPrefix", + mock.AnythingOfType("map[string]string"), + mock.AnythingOfType("[]string"), + mock.AnythingOfType("uint64")). + Return(func(map[string]string, []string, typeutil.Timestamp) error { + removeCollectionCalled = true + return errors.New("error mock MultiSaveAndRemoveWithPrefix") + }).Once() + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{ + Partitions: []*model.Partition{ + {PartitionName: "test"}, + }, + State: pb.CollectionState_CollectionDropping, + } + err := kc.DropCollection(ctx, coll, 100) + assert.Error(t, err) + assert.True(t, removeOtherCalled) + assert.True(t, removeCollectionCalled) + }) + + t.Run("normal case", func(t *testing.T) { + mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemoveWithPrefix(nil)) + kc := &Catalog{Snapshot: mockSnapshot} + ctx := context.Background() + coll := &model.Collection{ + Partitions: []*model.Partition{ + {PartitionName: "test"}, + }, + State: pb.CollectionState_CollectionDropping, + } + err := kc.DropCollection(ctx, coll, 100) + assert.NoError(t, err) + }) +} diff --git a/internal/metastore/kv/rootcoord/suffix_snapshot.go b/internal/metastore/kv/rootcoord/suffix_snapshot.go index e36c7da8e6..2273c9f757 100644 --- a/internal/metastore/kv/rootcoord/suffix_snapshot.go +++ b/internal/metastore/kv/rootcoord/suffix_snapshot.go @@ -176,7 +176,7 @@ func (ss *SuffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, e } latest = ss.lastestTS[key] } - return latest < ts, nil + return latest <= ts, nil } // loadLatestTS load the loatest ts for specified key diff --git a/internal/metastore/model/alias.go b/internal/metastore/model/alias.go index a1bc8cc991..ba2b04f805 100644 --- a/internal/metastore/model/alias.go +++ b/internal/metastore/model/alias.go @@ -6,6 +6,25 @@ type Alias struct { Name string CollectionID int64 CreatedTime uint64 + State pb.AliasState +} + +func (a Alias) Available() bool { + return a.State == pb.AliasState_AliasCreated +} + +func (a Alias) Clone() *Alias { + return &Alias{ + Name: a.Name, + CollectionID: a.CollectionID, + CreatedTime: a.CreatedTime, + State: a.State, + } +} + +func (a Alias) Equal(other Alias) bool { + return a.Name == other.Name && + a.CollectionID == other.CollectionID } func MarshalAliasModel(alias *Alias) *pb.AliasInfo { @@ -13,6 +32,7 @@ func MarshalAliasModel(alias *Alias) *pb.AliasInfo { AliasName: alias.Name, CollectionId: alias.CollectionID, CreatedTime: alias.CreatedTime, + State: alias.State, } } @@ -21,5 +41,6 @@ func UnmarshalAliasModel(info *pb.AliasInfo) *Alias { Name: info.GetAliasName(), CollectionID: info.GetCollectionId(), CreatedTime: info.GetCreatedTime(), + State: info.GetState(), } } diff --git a/internal/metastore/model/alias_test.go b/internal/metastore/model/alias_test.go new file mode 100644 index 0000000000..ab9d9061ba --- /dev/null +++ b/internal/metastore/model/alias_test.go @@ -0,0 +1,91 @@ +package model + +import ( + "testing" + + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/stretchr/testify/assert" +) + +func TestAlias_Available(t *testing.T) { + type fields struct { + Name string + CollectionID int64 + CreatedTime uint64 + State etcdpb.AliasState + } + tests := []struct { + name string + fields fields + want bool + }{ + { + fields: fields{State: etcdpb.AliasState_AliasCreated}, + want: true, + }, + { + fields: fields{State: etcdpb.AliasState_AliasCreating}, + want: false, + }, + + { + fields: fields{State: etcdpb.AliasState_AliasDropping}, + want: false, + }, + { + fields: fields{State: etcdpb.AliasState_AliasDropped}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := Alias{ + Name: tt.fields.Name, + CollectionID: tt.fields.CollectionID, + CreatedTime: tt.fields.CreatedTime, + State: tt.fields.State, + } + assert.Equalf(t, tt.want, a.Available(), "Available()") + }) + } +} + +func TestAlias_Clone(t *testing.T) { + type fields struct { + Name string + CollectionID int64 + CreatedTime uint64 + State etcdpb.AliasState + } + tests := []struct { + name string + fields fields + }{ + {fields: fields{Name: "alias1", CollectionID: 101}}, + {fields: fields{Name: "alias2", CollectionID: 102}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := Alias{ + Name: tt.fields.Name, + CollectionID: tt.fields.CollectionID, + CreatedTime: tt.fields.CreatedTime, + State: tt.fields.State, + } + clone := a.Clone() + assert.True(t, clone.Equal(a)) + }) + } +} + +func TestAlias_Codec(t *testing.T) { + alias := &Alias{ + Name: "alias", + CollectionID: 101, + CreatedTime: 10000, + State: etcdpb.AliasState_AliasCreated, + } + aliasPb := MarshalAliasModel(alias) + aliasFromPb := UnmarshalAliasModel(aliasPb) + assert.True(t, aliasFromPb.Equal(*alias)) +} diff --git a/internal/metastore/model/collection.go b/internal/metastore/model/collection.go index f96054057d..0b3f900d0a 100644 --- a/internal/metastore/model/collection.go +++ b/internal/metastore/model/collection.go @@ -22,7 +22,12 @@ type Collection struct { CreateTime uint64 ConsistencyLevel commonpb.ConsistencyLevel Aliases []string // TODO: deprecate this. - Extra map[string]string // extra kvs + Extra map[string]string // deprecated. + State pb.CollectionState +} + +func (c Collection) Available() bool { + return c.State == pb.CollectionState_CollectionCreated } func (c Collection) Clone() *Collection { @@ -32,19 +37,31 @@ func (c Collection) Clone() *Collection { Name: c.Name, Description: c.Description, AutoID: c.AutoID, - Fields: c.Fields, - Partitions: c.Partitions, - VirtualChannelNames: c.VirtualChannelNames, - PhysicalChannelNames: c.PhysicalChannelNames, + Fields: CloneFields(c.Fields), + Partitions: ClonePartitions(c.Partitions), + VirtualChannelNames: common.CloneStringList(c.VirtualChannelNames), + PhysicalChannelNames: common.CloneStringList(c.PhysicalChannelNames), ShardsNum: c.ShardsNum, ConsistencyLevel: c.ConsistencyLevel, CreateTime: c.CreateTime, - StartPositions: c.StartPositions, - Aliases: c.Aliases, - Extra: c.Extra, + StartPositions: common.CloneKeyDataPairs(c.StartPositions), + Aliases: common.CloneStringList(c.Aliases), + Extra: common.CloneStr2Str(c.Extra), + State: c.State, } } +func (c Collection) Equal(other Collection) bool { + return c.TenantID == other.TenantID && + CheckPartitionsEqual(c.Partitions, other.Partitions) && + c.Name == other.Name && + c.Description == other.Description && + c.AutoID == other.AutoID && + CheckFieldsEqual(c.Fields, other.Fields) && + c.ShardsNum == other.ShardsNum && + c.ConsistencyLevel == other.ConsistencyLevel +} + func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection { if coll == nil { return nil @@ -81,6 +98,7 @@ func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection { ConsistencyLevel: coll.ConsistencyLevel, CreateTime: coll.CreateTime, StartPositions: coll.StartPositions, + State: coll.State, } } @@ -115,5 +133,6 @@ func MarshalCollectionModel(coll *Collection) *pb.CollectionInfo { ShardsNum: coll.ShardsNum, ConsistencyLevel: coll.ConsistencyLevel, StartPositions: coll.StartPositions, + State: coll.State, } } diff --git a/internal/metastore/model/field.go b/internal/metastore/model/field.go index 0f7fc46ea0..e5a0bb4840 100644 --- a/internal/metastore/model/field.go +++ b/internal/metastore/model/field.go @@ -1,6 +1,8 @@ package model import ( + "github.com/milvus-io/milvus/internal/common" + "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/schemapb" ) @@ -14,6 +16,62 @@ type Field struct { TypeParams []*commonpb.KeyValuePair IndexParams []*commonpb.KeyValuePair AutoID bool + State schemapb.FieldState +} + +func (f Field) Available() bool { + return f.State == schemapb.FieldState_FieldCreated +} + +func (f Field) Clone() *Field { + return &Field{ + FieldID: f.FieldID, + Name: f.Name, + IsPrimaryKey: f.IsPrimaryKey, + Description: f.Description, + DataType: f.DataType, + TypeParams: common.CloneKeyValuePairs(f.TypeParams), + IndexParams: common.CloneKeyValuePairs(f.IndexParams), + AutoID: f.AutoID, + State: f.State, + } +} + +func CloneFields(fields []*Field) []*Field { + clone := make([]*Field, 0, len(fields)) + for _, field := range fields { + clone = append(clone, field.Clone()) + } + return clone +} + +func checkParamsEqual(paramsA, paramsB []*commonpb.KeyValuePair) bool { + var A common.KeyValuePairs = paramsA + return A.Equal(paramsB) +} + +func (f Field) Equal(other Field) bool { + return f.FieldID == other.FieldID && + f.Name == other.Name && + f.IsPrimaryKey == other.IsPrimaryKey && + f.Description == other.Description && + f.DataType == other.DataType && + checkParamsEqual(f.TypeParams, f.TypeParams) && + checkParamsEqual(f.IndexParams, other.IndexParams) && + f.AutoID == other.AutoID +} + +func CheckFieldsEqual(fieldsA, fieldsB []*Field) bool { + if len(fieldsA) != len(fieldsB) { + return false + } + l := len(fieldsA) + for i := 0; i < l; i++ { + if !fieldsA[i].Equal(*fieldsB[i]) { + return false + } + } + return true } func MarshalFieldModel(field *Field) *schemapb.FieldSchema { diff --git a/internal/metastore/model/field_test.go b/internal/metastore/model/field_test.go index e21db4f73a..426ffe29f7 100644 --- a/internal/metastore/model/field_test.go +++ b/internal/metastore/model/field_test.go @@ -3,6 +3,8 @@ package model import ( "testing" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/stretchr/testify/assert" ) @@ -54,3 +56,85 @@ func TestUnmarshalFieldModels(t *testing.T) { assert.Equal(t, []*Field{fieldModel}, ret) assert.Nil(t, UnmarshalFieldModels(nil)) } + +func TestCheckFieldsEqual(t *testing.T) { + type args struct { + fieldsA []*Field + fieldsB []*Field + } + tests := []struct { + name string + args args + want bool + }{ + { + // length not match. + args: args{ + fieldsA: []*Field{{Name: "f1"}}, + fieldsB: []*Field{}, + }, + want: false, + }, + { + args: args{ + fieldsA: []*Field{{Name: "f1"}}, + fieldsB: []*Field{{Name: "f2"}}, + }, + want: false, + }, + { + args: args{ + fieldsA: []*Field{{Name: "f1"}, {Name: "f2"}}, + fieldsB: []*Field{{Name: "f1"}, {Name: "f2"}}, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CheckFieldsEqual(tt.args.fieldsA, tt.args.fieldsB); got != tt.want { + t.Errorf("CheckFieldsEqual() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestField_Available(t *testing.T) { + type fields struct { + FieldID int64 + Name string + IsPrimaryKey bool + Description string + DataType schemapb.DataType + TypeParams []*commonpb.KeyValuePair + IndexParams []*commonpb.KeyValuePair + AutoID bool + State schemapb.FieldState + } + tests := []struct { + name string + fields fields + want bool + }{ + {fields: fields{State: schemapb.FieldState_FieldCreated}, want: true}, + {fields: fields{State: schemapb.FieldState_FieldCreating}, want: false}, + {fields: fields{State: schemapb.FieldState_FieldDropping}, want: false}, + {fields: fields{State: schemapb.FieldState_FieldDropped}, want: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f := Field{ + FieldID: tt.fields.FieldID, + Name: tt.fields.Name, + IsPrimaryKey: tt.fields.IsPrimaryKey, + Description: tt.fields.Description, + DataType: tt.fields.DataType, + TypeParams: tt.fields.TypeParams, + IndexParams: tt.fields.IndexParams, + AutoID: tt.fields.AutoID, + State: tt.fields.State, + } + assert.Equalf(t, tt.want, f.Available(), "Available()") + }) + } +} diff --git a/internal/metastore/model/partition.go b/internal/metastore/model/partition.go index 81fa863d92..a9205f47ff 100644 --- a/internal/metastore/model/partition.go +++ b/internal/metastore/model/partition.go @@ -1,13 +1,57 @@ package model -import pb "github.com/milvus-io/milvus/internal/proto/etcdpb" +import ( + "github.com/milvus-io/milvus/internal/common" + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" +) type Partition struct { PartitionID int64 PartitionName string PartitionCreatedTimestamp uint64 - Extra map[string]string + Extra map[string]string // deprecated. CollectionID int64 + State pb.PartitionState +} + +func (p Partition) Available() bool { + return p.State == pb.PartitionState_PartitionCreated +} + +func (p Partition) Clone() *Partition { + return &Partition{ + PartitionID: p.PartitionID, + PartitionName: p.PartitionName, + PartitionCreatedTimestamp: p.PartitionCreatedTimestamp, + Extra: common.CloneStr2Str(p.Extra), + CollectionID: p.CollectionID, + State: p.State, + } +} + +func ClonePartitions(partitions []*Partition) []*Partition { + clone := make([]*Partition, 0, len(partitions)) + for _, partition := range partitions { + clone = append(clone, partition.Clone()) + } + return clone +} + +func (p Partition) Equal(other Partition) bool { + return p.PartitionName == other.PartitionName +} + +func CheckPartitionsEqual(partitionsA, partitionsB []*Partition) bool { + if len(partitionsA) != len(partitionsB) { + return false + } + l := len(partitionsA) + for i := 0; i < l; i++ { + if !partitionsA[i].Equal(*partitionsB[i]) { + return false + } + } + return true } func MarshalPartitionModel(partition *Partition) *pb.PartitionInfo { @@ -16,6 +60,7 @@ func MarshalPartitionModel(partition *Partition) *pb.PartitionInfo { PartitionName: partition.PartitionName, PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp, CollectionId: partition.CollectionID, + State: partition.State, } } @@ -25,5 +70,6 @@ func UnmarshalPartitionModel(info *pb.PartitionInfo) *Partition { PartitionName: info.GetPartitionName(), PartitionCreatedTimestamp: info.GetPartitionCreatedTimestamp(), CollectionID: info.GetCollectionId(), + State: info.GetState(), } } diff --git a/internal/metastore/model/partition_test.go b/internal/metastore/model/partition_test.go new file mode 100644 index 0000000000..6f0356a45e --- /dev/null +++ b/internal/metastore/model/partition_test.go @@ -0,0 +1,47 @@ +package model + +import ( + "testing" +) + +func TestCheckPartitionsEqual(t *testing.T) { + type args struct { + partitionsA []*Partition + partitionsB []*Partition + } + tests := []struct { + name string + args args + want bool + }{ + { + // length not match. + args: args{ + partitionsA: []*Partition{{PartitionName: "_default"}}, + partitionsB: []*Partition{}, + }, + want: false, + }, + { + args: args{ + partitionsA: []*Partition{{PartitionName: "_default"}}, + partitionsB: []*Partition{{PartitionName: "not_default"}}, + }, + want: false, + }, + { + args: args{ + partitionsA: []*Partition{{PartitionName: "_default"}, {PartitionName: "not_default"}}, + partitionsB: []*Partition{{PartitionName: "_default"}, {PartitionName: "not_default"}}, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CheckPartitionsEqual(tt.args.partitionsA, tt.args.partitionsB); got != tt.want { + t.Errorf("CheckPartitionsEqual() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/mq/msgstream/mock_mq_factory.go b/internal/mq/msgstream/mock_mq_factory.go new file mode 100644 index 0000000000..d457b3707a --- /dev/null +++ b/internal/mq/msgstream/mock_mq_factory.go @@ -0,0 +1,16 @@ +package msgstream + +import "context" + +type MockMqFactory struct { + Factory + NewMsgStreamFunc func(ctx context.Context) (MsgStream, error) +} + +func NewMockMqFactory() *MockMqFactory { + return &MockMqFactory{} +} + +func (m MockMqFactory) NewMsgStream(ctx context.Context) (MsgStream, error) { + return m.NewMsgStreamFunc(ctx) +} diff --git a/internal/mq/msgstream/mock_msgstream.go b/internal/mq/msgstream/mock_msgstream.go new file mode 100644 index 0000000000..e4d7f0a2f8 --- /dev/null +++ b/internal/mq/msgstream/mock_msgstream.go @@ -0,0 +1,24 @@ +package msgstream + +type MockMsgStream struct { + MsgStream + AsProducerFunc func(channels []string) + BroadcastMarkFunc func(*MsgPack) (map[string][]MessageID, error) + BroadcastFunc func(*MsgPack) error +} + +func NewMockMsgStream() *MockMsgStream { + return &MockMsgStream{} +} + +func (m MockMsgStream) AsProducer(channels []string) { + m.AsProducerFunc(channels) +} + +func (m MockMsgStream) BroadcastMark(pack *MsgPack) (map[string][]MessageID, error) { + return m.BroadcastMarkFunc(pack) +} + +func (m MockMsgStream) Broadcast(pack *MsgPack) error { + return m.BroadcastFunc(pack) +} diff --git a/internal/proto/etcd_meta.proto b/internal/proto/etcd_meta.proto index efdec7567f..7a16dd85a9 100644 --- a/internal/proto/etcd_meta.proto +++ b/internal/proto/etcd_meta.proto @@ -20,6 +20,27 @@ message FieldIndexInfo{ int64 indexID = 2; } +enum CollectionState { + CollectionCreated = 0; + CollectionCreating = 1; + CollectionDropping = 2; + CollectionDropped = 3; +} + +enum PartitionState { + PartitionCreated = 0; + PartitionCreating = 1; + PartitionDropping = 2; + PartitionDropped = 3; +} + +enum AliasState { + AliasCreated = 0; + AliasCreating = 1; + AliasDropping = 2; + AliasDropped = 3; +} + message CollectionInfo { int64 ID = 1; schema.CollectionSchema schema = 2; @@ -37,6 +58,7 @@ message CollectionInfo { int32 shards_num = 10; repeated common.KeyDataPair start_positions = 11; common.ConsistencyLevel consistency_level = 12; + CollectionState state = 13; // To keep compatible with older version, default state is `Created`. } message PartitionInfo { @@ -44,12 +66,14 @@ message PartitionInfo { string partitionName = 2; uint64 partition_created_timestamp = 3; int64 collection_id = 4; + PartitionState state = 5; // To keep compatible with older version, default state is `Created`. } message AliasInfo { string alias_name = 1; int64 collection_id = 2; uint64 created_time = 3; + AliasState state = 4; // To keep compatible with older version, default state is `Created`. } message SegmentIndexInfo { diff --git a/internal/proto/etcdpb/etcd_meta.pb.go b/internal/proto/etcdpb/etcd_meta.pb.go index e05b0ece45..140ab01415 100644 --- a/internal/proto/etcdpb/etcd_meta.pb.go +++ b/internal/proto/etcdpb/etcd_meta.pb.go @@ -22,6 +22,99 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type CollectionState int32 + +const ( + CollectionState_CollectionCreated CollectionState = 0 + CollectionState_CollectionCreating CollectionState = 1 + CollectionState_CollectionDropping CollectionState = 2 + CollectionState_CollectionDropped CollectionState = 3 +) + +var CollectionState_name = map[int32]string{ + 0: "CollectionCreated", + 1: "CollectionCreating", + 2: "CollectionDropping", + 3: "CollectionDropped", +} + +var CollectionState_value = map[string]int32{ + "CollectionCreated": 0, + "CollectionCreating": 1, + "CollectionDropping": 2, + "CollectionDropped": 3, +} + +func (x CollectionState) String() string { + return proto.EnumName(CollectionState_name, int32(x)) +} + +func (CollectionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_975d306d62b73e88, []int{0} +} + +type PartitionState int32 + +const ( + PartitionState_PartitionCreated PartitionState = 0 + PartitionState_PartitionCreating PartitionState = 1 + PartitionState_PartitionDropping PartitionState = 2 + PartitionState_PartitionDropped PartitionState = 3 +) + +var PartitionState_name = map[int32]string{ + 0: "PartitionCreated", + 1: "PartitionCreating", + 2: "PartitionDropping", + 3: "PartitionDropped", +} + +var PartitionState_value = map[string]int32{ + "PartitionCreated": 0, + "PartitionCreating": 1, + "PartitionDropping": 2, + "PartitionDropped": 3, +} + +func (x PartitionState) String() string { + return proto.EnumName(PartitionState_name, int32(x)) +} + +func (PartitionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_975d306d62b73e88, []int{1} +} + +type AliasState int32 + +const ( + AliasState_AliasCreated AliasState = 0 + AliasState_AliasCreating AliasState = 1 + AliasState_AliasDropping AliasState = 2 + AliasState_AliasDropped AliasState = 3 +) + +var AliasState_name = map[int32]string{ + 0: "AliasCreated", + 1: "AliasCreating", + 2: "AliasDropping", + 3: "AliasDropped", +} + +var AliasState_value = map[string]int32{ + "AliasCreated": 0, + "AliasCreating": 1, + "AliasDropping": 2, + "AliasDropped": 3, +} + +func (x AliasState) String() string { + return proto.EnumName(AliasState_name, int32(x)) +} + +func (AliasState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_975d306d62b73e88, []int{2} +} + type IndexInfo struct { IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` IndexID int64 `protobuf:"varint,2,opt,name=indexID,proto3" json:"indexID,omitempty"` @@ -157,6 +250,7 @@ type CollectionInfo struct { ShardsNum int32 `protobuf:"varint,10,opt,name=shards_num,json=shardsNum,proto3" json:"shards_num,omitempty"` StartPositions []*commonpb.KeyDataPair `protobuf:"bytes,11,rep,name=start_positions,json=startPositions,proto3" json:"start_positions,omitempty"` ConsistencyLevel commonpb.ConsistencyLevel `protobuf:"varint,12,opt,name=consistency_level,json=consistencyLevel,proto3,enum=milvus.proto.common.ConsistencyLevel" json:"consistency_level,omitempty"` + State CollectionState `protobuf:"varint,13,opt,name=state,proto3,enum=milvus.proto.etcd.CollectionState" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -271,14 +365,22 @@ func (m *CollectionInfo) GetConsistencyLevel() commonpb.ConsistencyLevel { return commonpb.ConsistencyLevel_Strong } +func (m *CollectionInfo) GetState() CollectionState { + if m != nil { + return m.State + } + return CollectionState_CollectionCreated +} + type PartitionInfo struct { - PartitionID int64 `protobuf:"varint,1,opt,name=partitionID,proto3" json:"partitionID,omitempty"` - PartitionName string `protobuf:"bytes,2,opt,name=partitionName,proto3" json:"partitionName,omitempty"` - PartitionCreatedTimestamp uint64 `protobuf:"varint,3,opt,name=partition_created_timestamp,json=partitionCreatedTimestamp,proto3" json:"partition_created_timestamp,omitempty"` - CollectionId int64 `protobuf:"varint,4,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PartitionID int64 `protobuf:"varint,1,opt,name=partitionID,proto3" json:"partitionID,omitempty"` + PartitionName string `protobuf:"bytes,2,opt,name=partitionName,proto3" json:"partitionName,omitempty"` + PartitionCreatedTimestamp uint64 `protobuf:"varint,3,opt,name=partition_created_timestamp,json=partitionCreatedTimestamp,proto3" json:"partition_created_timestamp,omitempty"` + CollectionId int64 `protobuf:"varint,4,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"` + State PartitionState `protobuf:"varint,5,opt,name=state,proto3,enum=milvus.proto.etcd.PartitionState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *PartitionInfo) Reset() { *m = PartitionInfo{} } @@ -334,13 +436,21 @@ func (m *PartitionInfo) GetCollectionId() int64 { return 0 } +func (m *PartitionInfo) GetState() PartitionState { + if m != nil { + return m.State + } + return PartitionState_PartitionCreated +} + type AliasInfo struct { - AliasName string `protobuf:"bytes,1,opt,name=alias_name,json=aliasName,proto3" json:"alias_name,omitempty"` - CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"` - CreatedTime uint64 `protobuf:"varint,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AliasName string `protobuf:"bytes,1,opt,name=alias_name,json=aliasName,proto3" json:"alias_name,omitempty"` + CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"` + CreatedTime uint64 `protobuf:"varint,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + State AliasState `protobuf:"varint,4,opt,name=state,proto3,enum=milvus.proto.etcd.AliasState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *AliasInfo) Reset() { *m = AliasInfo{} } @@ -389,6 +499,13 @@ func (m *AliasInfo) GetCreatedTime() uint64 { return 0 } +func (m *AliasInfo) GetState() AliasState { + if m != nil { + return m.State + } + return AliasState_AliasCreated +} + type SegmentIndexInfo struct { CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"` PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"` @@ -638,6 +755,9 @@ func (m *CredentialInfo) GetSha256Password() string { } func init() { + proto.RegisterEnum("milvus.proto.etcd.CollectionState", CollectionState_name, CollectionState_value) + proto.RegisterEnum("milvus.proto.etcd.PartitionState", PartitionState_name, PartitionState_value) + proto.RegisterEnum("milvus.proto.etcd.AliasState", AliasState_name, AliasState_value) proto.RegisterType((*IndexInfo)(nil), "milvus.proto.etcd.IndexInfo") proto.RegisterType((*FieldIndexInfo)(nil), "milvus.proto.etcd.FieldIndexInfo") proto.RegisterType((*CollectionInfo)(nil), "milvus.proto.etcd.CollectionInfo") @@ -651,59 +771,68 @@ func init() { func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) } var fileDescriptor_975d306d62b73e88 = []byte{ - // 861 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0x96, 0xe3, 0xfc, 0xf9, 0x24, 0x4d, 0xb7, 0x03, 0xac, 0xbc, 0x65, 0x01, 0x6f, 0x60, 0xc1, - 0x37, 0xdb, 0x8a, 0x2e, 0x70, 0x07, 0x02, 0x6a, 0xad, 0x14, 0x01, 0x55, 0x34, 0xad, 0xb8, 0xe0, - 0xc6, 0x9a, 0xd8, 0xa7, 0xcd, 0x48, 0xfe, 0x93, 0x67, 0x5c, 0xe8, 0x1b, 0xf0, 0x46, 0xdc, 0x70, - 0xcb, 0xd3, 0xf0, 0x0e, 0x08, 0xcd, 0x78, 0xec, 0xd8, 0x09, 0xe5, 0x72, 0xef, 0xf2, 0x7d, 0x33, - 0xe7, 0xf8, 0xfc, 0x7c, 0xf3, 0x05, 0x8e, 0x51, 0x46, 0x71, 0x98, 0xa2, 0x64, 0x67, 0x45, 0x99, - 0xcb, 0x9c, 0x9c, 0xa4, 0x3c, 0xb9, 0xaf, 0x44, 0x8d, 0xce, 0xd4, 0xe9, 0xe9, 0x3c, 0xca, 0xd3, - 0x34, 0xcf, 0x6a, 0xea, 0x74, 0x2e, 0xa2, 0x2d, 0xa6, 0xe6, 0xfa, 0xf2, 0x2f, 0x0b, 0x9c, 0x55, - 0x16, 0xe3, 0x6f, 0xab, 0xec, 0x36, 0x27, 0x1f, 0x00, 0x70, 0x05, 0xc2, 0x8c, 0xa5, 0xe8, 0x5a, - 0x9e, 0xe5, 0x3b, 0xd4, 0xd1, 0xcc, 0x15, 0x4b, 0x91, 0xb8, 0x30, 0xd1, 0x60, 0x15, 0xb8, 0x03, - 0xcf, 0xf2, 0x6d, 0xda, 0x40, 0x12, 0xc0, 0xbc, 0x0e, 0x2c, 0x58, 0xc9, 0x52, 0xe1, 0xda, 0x9e, - 0xed, 0xcf, 0x2e, 0x5e, 0x9c, 0xf5, 0x8a, 0x31, 0x65, 0xfc, 0x80, 0x0f, 0x3f, 0xb3, 0xa4, 0xc2, - 0x35, 0xe3, 0x25, 0x9d, 0xe9, 0xb0, 0xb5, 0x8e, 0x52, 0xf9, 0x63, 0x4c, 0x50, 0x62, 0xec, 0x0e, - 0x3d, 0xcb, 0x9f, 0xd2, 0x06, 0x92, 0x8f, 0x60, 0x16, 0x95, 0xc8, 0x24, 0x86, 0x92, 0xa7, 0xe8, - 0x8e, 0x3c, 0xcb, 0x1f, 0x52, 0xa8, 0xa9, 0x1b, 0x9e, 0xe2, 0x32, 0x80, 0xc5, 0x1b, 0x8e, 0x49, - 0xbc, 0xeb, 0xc5, 0x85, 0xc9, 0x2d, 0x4f, 0x30, 0x5e, 0x05, 0xba, 0x11, 0x9b, 0x36, 0xf0, 0xf1, - 0x36, 0x96, 0xff, 0x0c, 0x61, 0x71, 0x99, 0x27, 0x09, 0x46, 0x92, 0xe7, 0x99, 0x4e, 0xb3, 0x80, - 0x41, 0x9b, 0x61, 0xb0, 0x0a, 0xc8, 0xd7, 0x30, 0xae, 0x07, 0xa8, 0x63, 0x67, 0x17, 0x2f, 0xfb, - 0x3d, 0x9a, 0xe1, 0xee, 0x92, 0x5c, 0x6b, 0x82, 0x9a, 0xa0, 0xfd, 0x46, 0xec, 0xfd, 0x46, 0xc8, - 0x12, 0xe6, 0x05, 0x2b, 0x25, 0xd7, 0x05, 0x04, 0xc2, 0x1d, 0x7a, 0xb6, 0x6f, 0xd3, 0x1e, 0x47, - 0x3e, 0x85, 0x45, 0x8b, 0xd5, 0x62, 0x84, 0x3b, 0xf2, 0x6c, 0xdf, 0xa1, 0x7b, 0x2c, 0x79, 0x03, - 0x47, 0xb7, 0x6a, 0x28, 0xa1, 0xee, 0x0f, 0x85, 0x3b, 0xfe, 0xaf, 0xb5, 0x28, 0x8d, 0x9c, 0xf5, - 0x87, 0x47, 0xe7, 0xb7, 0x2d, 0x46, 0x41, 0x2e, 0xe0, 0xbd, 0x7b, 0x5e, 0xca, 0x8a, 0x25, 0x61, - 0xb4, 0x65, 0x59, 0x86, 0x89, 0x16, 0x88, 0x70, 0x27, 0xfa, 0xb3, 0xef, 0x98, 0xc3, 0xcb, 0xfa, - 0xac, 0xfe, 0xf6, 0x17, 0xf0, 0xb4, 0xd8, 0x3e, 0x08, 0x1e, 0x1d, 0x04, 0x4d, 0x75, 0xd0, 0xbb, - 0xcd, 0x69, 0x2f, 0xea, 0x5b, 0x78, 0xde, 0xf6, 0x10, 0xd6, 0x53, 0x89, 0xf5, 0xa4, 0x84, 0x64, - 0x69, 0x21, 0x5c, 0xc7, 0xb3, 0xfd, 0x21, 0x3d, 0x6d, 0xef, 0x5c, 0xd6, 0x57, 0x6e, 0xda, 0x1b, - 0x4a, 0xc2, 0x62, 0xcb, 0xca, 0x58, 0x84, 0x59, 0x95, 0xba, 0xe0, 0x59, 0xfe, 0x88, 0x3a, 0x35, - 0x73, 0x55, 0xa5, 0x64, 0x05, 0xc7, 0x42, 0xb2, 0x52, 0x86, 0x45, 0x2e, 0x74, 0x06, 0xe1, 0xce, - 0xf4, 0x50, 0xbc, 0xc7, 0xb4, 0x1a, 0x30, 0xc9, 0xb4, 0x54, 0x17, 0x3a, 0x70, 0xdd, 0xc4, 0x11, - 0x0a, 0x27, 0x51, 0x9e, 0x09, 0x2e, 0x24, 0x66, 0xd1, 0x43, 0x98, 0xe0, 0x3d, 0x26, 0xee, 0xdc, - 0xb3, 0xfc, 0xc5, 0xbe, 0x28, 0x4c, 0xb2, 0xcb, 0xdd, 0xed, 0x1f, 0xd5, 0x65, 0xfa, 0x24, 0xda, - 0x63, 0x96, 0x7f, 0x5a, 0x70, 0xb4, 0x6e, 0x57, 0xad, 0xf4, 0xe7, 0xc1, 0xac, 0xb3, 0x7b, 0x23, - 0xc4, 0x2e, 0x45, 0x3e, 0x81, 0xa3, 0xde, 0xde, 0xb5, 0x30, 0x1d, 0xda, 0x27, 0xc9, 0x37, 0xf0, - 0xfe, 0xff, 0x4c, 0xd6, 0x08, 0xf1, 0xd9, 0xa3, 0x83, 0x25, 0x1f, 0xc3, 0x51, 0xd4, 0x8a, 0x3a, - 0xe4, 0xf5, 0x0b, 0xb5, 0xe9, 0x7c, 0x47, 0xae, 0xe2, 0x65, 0x09, 0xce, 0x77, 0x09, 0x67, 0xa2, - 0x31, 0x13, 0xa6, 0x40, 0xcf, 0x4c, 0x34, 0xa3, 0x0b, 0x3a, 0x48, 0x38, 0x38, 0x4c, 0x48, 0x5e, - 0xc0, 0xbc, 0x5b, 0xab, 0x29, 0xd3, 0x3c, 0x21, 0x5d, 0xdd, 0xf2, 0xf7, 0x01, 0x3c, 0xb9, 0xc6, - 0xbb, 0x14, 0x33, 0xb9, 0x7b, 0xfc, 0x4b, 0xe8, 0xe6, 0x69, 0xc6, 0xd6, 0xe3, 0xf6, 0x27, 0x3b, - 0x38, 0x9c, 0xec, 0x73, 0x70, 0x84, 0xc9, 0x1c, 0xe8, 0x4f, 0xdb, 0x74, 0x47, 0xd4, 0x06, 0xa3, - 0x5e, 0x49, 0x60, 0x66, 0xd1, 0xc0, 0xae, 0xc1, 0x8c, 0xfa, 0x3e, 0xe9, 0xc2, 0x64, 0x53, 0x71, - 0x1d, 0x33, 0xae, 0x4f, 0x0c, 0x54, 0x9d, 0x62, 0xc6, 0x36, 0x09, 0xd6, 0x8f, 0xd5, 0x9d, 0x68, - 0x03, 0x9c, 0xd5, 0x9c, 0x6e, 0x6c, 0xdf, 0x3b, 0xa6, 0x07, 0x26, 0xf8, 0xb7, 0xd5, 0xb5, 0xaf, - 0x9f, 0x50, 0xb2, 0xb7, 0x6e, 0x5f, 0x1f, 0x02, 0xb4, 0x13, 0x6a, 0xcc, 0xab, 0xc3, 0x90, 0x97, - 0x1d, 0xeb, 0x0a, 0x25, 0xbb, 0x6b, 0xac, 0x6b, 0xa7, 0xd6, 0x1b, 0x76, 0x27, 0x0e, 0x5c, 0x70, - 0x7c, 0xe8, 0x82, 0xcb, 0x3f, 0x54, 0xb7, 0x25, 0xc6, 0x98, 0x49, 0xce, 0x12, 0xbd, 0xf6, 0x53, - 0x98, 0x56, 0x02, 0xcb, 0x8e, 0xe0, 0x5a, 0x4c, 0x5e, 0x01, 0xc1, 0x2c, 0x2a, 0x1f, 0x0a, 0x25, - 0xa6, 0x82, 0x09, 0xf1, 0x6b, 0x5e, 0xc6, 0xe6, 0xad, 0x9c, 0xb4, 0x27, 0x6b, 0x73, 0x40, 0x9e, - 0xc2, 0x58, 0x62, 0xc6, 0x32, 0xa9, 0x9b, 0x74, 0xa8, 0x41, 0xe4, 0x19, 0x4c, 0xb9, 0x08, 0x45, - 0x55, 0x60, 0xd9, 0xfc, 0x49, 0x71, 0x71, 0xad, 0x20, 0xf9, 0x0c, 0x8e, 0xc5, 0x96, 0x5d, 0x7c, - 0xf9, 0xd5, 0x2e, 0xfd, 0x48, 0xc7, 0x2e, 0x6a, 0xba, 0xc9, 0xfd, 0xfd, 0xeb, 0x5f, 0x3e, 0xbf, - 0xe3, 0x72, 0x5b, 0x6d, 0x94, 0x33, 0x9c, 0xd7, 0x0b, 0x78, 0xc5, 0x73, 0xf3, 0xeb, 0x9c, 0x67, - 0x52, 0xd5, 0x9c, 0x9c, 0xeb, 0x9d, 0x9c, 0x2b, 0x7f, 0x2e, 0x36, 0x9b, 0xb1, 0x46, 0xaf, 0xff, - 0x0d, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x28, 0x7e, 0xad, 0xf2, 0x07, 0x00, 0x00, + // 1000 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcb, 0x6e, 0x23, 0x45, + 0x14, 0x9d, 0x76, 0xfb, 0xd5, 0xd7, 0x8f, 0xd8, 0xc5, 0x4c, 0xd4, 0x13, 0x66, 0xa0, 0xc7, 0x30, + 0xd0, 0x1a, 0x69, 0x12, 0x91, 0xf0, 0xda, 0x80, 0x80, 0xb4, 0x46, 0xb2, 0x80, 0x91, 0xd5, 0x89, + 0xb2, 0x60, 0xd3, 0x2a, 0x77, 0x57, 0xec, 0x42, 0xfd, 0x52, 0x57, 0x39, 0x90, 0x3f, 0xe0, 0x4f, + 0xf8, 0x04, 0xf8, 0x01, 0xbe, 0x86, 0x35, 0x7b, 0x54, 0x55, 0xfd, 0xb6, 0xc3, 0x92, 0x9d, 0xef, + 0xe9, 0xba, 0xb7, 0xee, 0xb9, 0xf7, 0xd4, 0x31, 0x1c, 0x11, 0xee, 0x07, 0x5e, 0x44, 0x38, 0x3e, + 0x4d, 0xb3, 0x84, 0x27, 0x68, 0x1e, 0xd1, 0xf0, 0x6e, 0xc7, 0x54, 0x74, 0x2a, 0xbe, 0x9e, 0x8c, + 0xfd, 0x24, 0x8a, 0x92, 0x58, 0x41, 0x27, 0x63, 0xe6, 0x6f, 0x49, 0x94, 0x1f, 0x5f, 0xfc, 0xa5, + 0x81, 0xb1, 0x8c, 0x03, 0xf2, 0xeb, 0x32, 0xbe, 0x4d, 0xd0, 0x73, 0x00, 0x2a, 0x02, 0x2f, 0xc6, + 0x11, 0x31, 0x35, 0x4b, 0xb3, 0x0d, 0xd7, 0x90, 0xc8, 0x5b, 0x1c, 0x11, 0x64, 0xc2, 0x40, 0x06, + 0x4b, 0xc7, 0xec, 0x58, 0x9a, 0xad, 0xbb, 0x45, 0x88, 0x1c, 0x18, 0xab, 0xc4, 0x14, 0x67, 0x38, + 0x62, 0xa6, 0x6e, 0xe9, 0xf6, 0xe8, 0xfc, 0xc5, 0x69, 0xa3, 0x99, 0xbc, 0x8d, 0xef, 0xc9, 0xfd, + 0x0d, 0x0e, 0x77, 0x64, 0x85, 0x69, 0xe6, 0x8e, 0x64, 0xda, 0x4a, 0x66, 0x89, 0xfa, 0x01, 0x09, + 0x09, 0x27, 0x81, 0xd9, 0xb5, 0x34, 0x7b, 0xe8, 0x16, 0x21, 0x7a, 0x1f, 0x46, 0x7e, 0x46, 0x30, + 0x27, 0x1e, 0xa7, 0x11, 0x31, 0x7b, 0x96, 0x66, 0x77, 0x5d, 0x50, 0xd0, 0x35, 0x8d, 0xc8, 0xc2, + 0x81, 0xe9, 0x1b, 0x4a, 0xc2, 0xa0, 0xe2, 0x62, 0xc2, 0xe0, 0x96, 0x86, 0x24, 0x58, 0x3a, 0x92, + 0x88, 0xee, 0x16, 0xe1, 0xc3, 0x34, 0x16, 0x7f, 0xf6, 0x60, 0x7a, 0x99, 0x84, 0x21, 0xf1, 0x39, + 0x4d, 0x62, 0x59, 0x66, 0x0a, 0x9d, 0xb2, 0x42, 0x67, 0xe9, 0xa0, 0xaf, 0xa0, 0xaf, 0x06, 0x28, + 0x73, 0x47, 0xe7, 0x2f, 0x9b, 0x1c, 0xf3, 0xe1, 0x56, 0x45, 0xae, 0x24, 0xe0, 0xe6, 0x49, 0x6d, + 0x22, 0x7a, 0x9b, 0x08, 0x5a, 0xc0, 0x38, 0xc5, 0x19, 0xa7, 0xb2, 0x01, 0x87, 0x99, 0x5d, 0x4b, + 0xb7, 0x75, 0xb7, 0x81, 0xa1, 0x8f, 0x60, 0x5a, 0xc6, 0x62, 0x31, 0xcc, 0xec, 0x59, 0xba, 0x6d, + 0xb8, 0x2d, 0x14, 0xbd, 0x81, 0xc9, 0xad, 0x18, 0x8a, 0x27, 0xf9, 0x11, 0x66, 0xf6, 0x0f, 0xad, + 0x45, 0x68, 0xe4, 0xb4, 0x39, 0x3c, 0x77, 0x7c, 0x5b, 0xc6, 0x84, 0xa1, 0x73, 0x78, 0x72, 0x47, + 0x33, 0xbe, 0xc3, 0xa1, 0xe7, 0x6f, 0x71, 0x1c, 0x93, 0x50, 0x0a, 0x84, 0x99, 0x03, 0x79, 0xed, + 0x3b, 0xf9, 0xc7, 0x4b, 0xf5, 0x4d, 0xdd, 0xfd, 0x29, 0x1c, 0xa7, 0xdb, 0x7b, 0x46, 0xfd, 0xbd, + 0xa4, 0xa1, 0x4c, 0x7a, 0x5c, 0x7c, 0x6d, 0x64, 0x7d, 0x03, 0xcf, 0x4a, 0x0e, 0x9e, 0x9a, 0x4a, + 0x20, 0x27, 0xc5, 0x38, 0x8e, 0x52, 0x66, 0x1a, 0x96, 0x6e, 0x77, 0xdd, 0x93, 0xf2, 0xcc, 0xa5, + 0x3a, 0x72, 0x5d, 0x9e, 0x10, 0x12, 0x66, 0x5b, 0x9c, 0x05, 0xcc, 0x8b, 0x77, 0x91, 0x09, 0x96, + 0x66, 0xf7, 0x5c, 0x43, 0x21, 0x6f, 0x77, 0x11, 0x5a, 0xc2, 0x11, 0xe3, 0x38, 0xe3, 0x5e, 0x9a, + 0x30, 0x59, 0x81, 0x99, 0x23, 0x39, 0x14, 0xeb, 0x21, 0xad, 0x3a, 0x98, 0x63, 0x29, 0xd5, 0xa9, + 0x4c, 0x5c, 0x15, 0x79, 0xc8, 0x85, 0xb9, 0x9f, 0xc4, 0x8c, 0x32, 0x4e, 0x62, 0xff, 0xde, 0x0b, + 0xc9, 0x1d, 0x09, 0xcd, 0xb1, 0xa5, 0xd9, 0xd3, 0xb6, 0x28, 0xf2, 0x62, 0x97, 0xd5, 0xe9, 0x1f, + 0xc4, 0x61, 0x77, 0xe6, 0xb7, 0x10, 0xf4, 0x25, 0xf4, 0x18, 0xc7, 0x9c, 0x98, 0x13, 0x59, 0x67, + 0x71, 0x60, 0x53, 0x35, 0x69, 0x89, 0x93, 0xae, 0x4a, 0x58, 0xfc, 0xa3, 0xc1, 0x64, 0x55, 0x8a, + 0x44, 0x28, 0xd7, 0x82, 0x51, 0x4d, 0x35, 0xb9, 0x84, 0xeb, 0x10, 0xfa, 0x10, 0x26, 0x0d, 0xc5, + 0x48, 0x49, 0x1b, 0x6e, 0x13, 0x44, 0x5f, 0xc3, 0xbb, 0xff, 0xb1, 0x93, 0x5c, 0xc2, 0x4f, 0x1f, + 0x5c, 0x09, 0xfa, 0x00, 0x26, 0x7e, 0xd9, 0xb3, 0x47, 0xd5, 0xdb, 0xd6, 0xdd, 0x71, 0x05, 0x2e, + 0x03, 0xf4, 0x45, 0x41, 0xbc, 0x27, 0x89, 0x1f, 0x92, 0x68, 0xc9, 0xae, 0xc1, 0xfb, 0x77, 0x0d, + 0x8c, 0x6f, 0x43, 0x8a, 0x59, 0x61, 0x60, 0x58, 0x04, 0x0d, 0x03, 0x93, 0x88, 0xa4, 0xb2, 0xd7, + 0x4a, 0xe7, 0x40, 0x2b, 0x2f, 0x60, 0x5c, 0x67, 0x99, 0x13, 0xcc, 0x9f, 0xad, 0xe4, 0x85, 0x2e, + 0x8a, 0x6e, 0xbb, 0xb2, 0xdb, 0xe7, 0x07, 0xba, 0x95, 0x3d, 0x35, 0x3a, 0xfd, 0xad, 0x03, 0xb3, + 0x2b, 0xb2, 0x89, 0x48, 0xcc, 0x2b, 0x97, 0x5a, 0x40, 0xfd, 0xf2, 0x62, 0x4b, 0x0d, 0xac, 0xbd, + 0xc8, 0xce, 0xfe, 0x22, 0x9f, 0x81, 0xc1, 0xf2, 0xca, 0x8e, 0xec, 0x57, 0x77, 0x2b, 0x40, 0x39, + 0xa1, 0x78, 0xce, 0x4e, 0x3e, 0xfa, 0x22, 0xac, 0x3b, 0x61, 0xaf, 0x69, 0xe8, 0x26, 0x0c, 0xd6, + 0x3b, 0x2a, 0x73, 0xfa, 0xea, 0x4b, 0x1e, 0x8a, 0xf1, 0x90, 0x18, 0xaf, 0x43, 0xa2, 0x5c, 0xc5, + 0x1c, 0x48, 0xa7, 0x1e, 0x29, 0x4c, 0x12, 0x6b, 0x9b, 0xdc, 0x70, 0xcf, 0xad, 0xff, 0xd6, 0xea, + 0x3e, 0xfb, 0x23, 0xe1, 0xf8, 0x7f, 0xf7, 0xd9, 0xf7, 0x00, 0xca, 0x09, 0x15, 0x2e, 0x5b, 0x43, + 0xd0, 0xcb, 0x9a, 0xc7, 0x7a, 0x1c, 0x6f, 0x0a, 0x8f, 0xad, 0x1e, 0xc7, 0x35, 0xde, 0xb0, 0x3d, + 0xbb, 0xee, 0xef, 0xdb, 0xf5, 0xe2, 0x0f, 0xc1, 0x36, 0x23, 0x01, 0x89, 0x39, 0xc5, 0xa1, 0x5c, + 0xfb, 0x09, 0x0c, 0x77, 0x8c, 0x64, 0x35, 0x95, 0x96, 0x31, 0x7a, 0x0d, 0x88, 0xc4, 0x7e, 0x76, + 0x9f, 0x0a, 0x05, 0xa6, 0x98, 0xb1, 0x5f, 0x92, 0x2c, 0xc8, 0x9f, 0xe6, 0xbc, 0xfc, 0xb2, 0xca, + 0x3f, 0xa0, 0x63, 0xe8, 0x73, 0x12, 0xe3, 0x98, 0x4b, 0x92, 0x86, 0x9b, 0x47, 0xe8, 0x29, 0x0c, + 0x29, 0xf3, 0xd8, 0x2e, 0x25, 0x59, 0xf1, 0x6f, 0x4a, 0xd9, 0x95, 0x08, 0xd1, 0xc7, 0x70, 0xc4, + 0xb6, 0xf8, 0xfc, 0xb3, 0xcf, 0xab, 0xf2, 0x3d, 0x99, 0x3b, 0x55, 0x70, 0x51, 0xfb, 0x55, 0x02, + 0x47, 0x2d, 0xbb, 0x41, 0x4f, 0x60, 0x5e, 0x41, 0xf9, 0x5b, 0x9f, 0x3d, 0x42, 0xc7, 0x80, 0x5a, + 0x30, 0x8d, 0x37, 0x33, 0xad, 0x89, 0x3b, 0x59, 0x92, 0xa6, 0x02, 0xef, 0x34, 0xcb, 0x48, 0x9c, + 0x04, 0x33, 0xfd, 0xd5, 0xcf, 0x30, 0x6d, 0x3e, 0x73, 0xf4, 0x18, 0x66, 0xab, 0x96, 0xb5, 0xcc, + 0x1e, 0x89, 0xf4, 0x26, 0xaa, 0x6e, 0xab, 0xc3, 0xb5, 0xcb, 0xea, 0x35, 0xaa, 0xbb, 0x6e, 0x00, + 0xaa, 0x47, 0x8a, 0x66, 0x30, 0x96, 0x51, 0x75, 0xc7, 0x1c, 0x26, 0x15, 0xa2, 0xea, 0x17, 0x50, + 0xad, 0x76, 0x91, 0x57, 0xd6, 0xfd, 0xee, 0xe2, 0xa7, 0x4f, 0x36, 0x94, 0x6f, 0x77, 0x6b, 0xe1, + 0xfb, 0x67, 0x4a, 0xb5, 0xaf, 0x69, 0x92, 0xff, 0x3a, 0xa3, 0x31, 0x17, 0x8b, 0x0e, 0xcf, 0xa4, + 0x90, 0xcf, 0x84, 0x59, 0xa4, 0xeb, 0x75, 0x5f, 0x46, 0x17, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x43, 0x11, 0x13, 0x31, 0xd0, 0x09, 0x00, 0x00, } diff --git a/internal/proto/proxy.proto b/internal/proto/proxy.proto index 178e39bf3a..4b0ddd5f6e 100644 --- a/internal/proto/proxy.proto +++ b/internal/proto/proxy.proto @@ -14,11 +14,6 @@ service Proxy { rpc InvalidateCollectionMetaCache(InvalidateCollMetaCacheRequest) returns (common.Status) {} rpc GetDdChannel(internal.GetDdChannelRequest) returns (milvus.StringResponse) {} - rpc ReleaseDQLMessageStream(ReleaseDQLMessageStreamRequest) returns (common.Status) {} - - rpc SendSearchResult(internal.SearchResults) returns (common.Status) {} - rpc SendRetrieveResult(internal.RetrieveResults) returns (common.Status) {} - rpc InvalidateCredentialCache(InvalidateCredCacheRequest) returns (common.Status) {} rpc UpdateCredentialCache(UpdateCredCacheRequest) returns (common.Status) {} @@ -32,12 +27,6 @@ message InvalidateCollMetaCacheRequest { int64 collectionID = 4; } -message ReleaseDQLMessageStreamRequest { - common.MsgBase base = 1; - int64 dbID = 2; - int64 collectionID = 3; -} - message InvalidateCredCacheRequest { common.MsgBase base = 1; string username = 2; diff --git a/internal/proto/proxypb/proxy.pb.go b/internal/proto/proxypb/proxy.pb.go index 576184e880..c75e891df2 100644 --- a/internal/proto/proxypb/proxy.pb.go +++ b/internal/proto/proxypb/proxy.pb.go @@ -90,61 +90,6 @@ func (m *InvalidateCollMetaCacheRequest) GetCollectionID() int64 { return 0 } -type ReleaseDQLMessageStreamRequest struct { - Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` - DbID int64 `protobuf:"varint,2,opt,name=dbID,proto3" json:"dbID,omitempty"` - CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReleaseDQLMessageStreamRequest) Reset() { *m = ReleaseDQLMessageStreamRequest{} } -func (m *ReleaseDQLMessageStreamRequest) String() string { return proto.CompactTextString(m) } -func (*ReleaseDQLMessageStreamRequest) ProtoMessage() {} -func (*ReleaseDQLMessageStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_700b50b08ed8dbaf, []int{1} -} - -func (m *ReleaseDQLMessageStreamRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Unmarshal(m, b) -} -func (m *ReleaseDQLMessageStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Marshal(b, m, deterministic) -} -func (m *ReleaseDQLMessageStreamRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReleaseDQLMessageStreamRequest.Merge(m, src) -} -func (m *ReleaseDQLMessageStreamRequest) XXX_Size() int { - return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Size(m) -} -func (m *ReleaseDQLMessageStreamRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReleaseDQLMessageStreamRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReleaseDQLMessageStreamRequest proto.InternalMessageInfo - -func (m *ReleaseDQLMessageStreamRequest) GetBase() *commonpb.MsgBase { - if m != nil { - return m.Base - } - return nil -} - -func (m *ReleaseDQLMessageStreamRequest) GetDbID() int64 { - if m != nil { - return m.DbID - } - return 0 -} - -func (m *ReleaseDQLMessageStreamRequest) GetCollectionID() int64 { - if m != nil { - return m.CollectionID - } - return 0 -} - type InvalidateCredCacheRequest struct { Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` @@ -157,7 +102,7 @@ func (m *InvalidateCredCacheRequest) Reset() { *m = InvalidateCredCacheR func (m *InvalidateCredCacheRequest) String() string { return proto.CompactTextString(m) } func (*InvalidateCredCacheRequest) ProtoMessage() {} func (*InvalidateCredCacheRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_700b50b08ed8dbaf, []int{2} + return fileDescriptor_700b50b08ed8dbaf, []int{1} } func (m *InvalidateCredCacheRequest) XXX_Unmarshal(b []byte) error { @@ -206,7 +151,7 @@ func (m *UpdateCredCacheRequest) Reset() { *m = UpdateCredCacheRequest{} func (m *UpdateCredCacheRequest) String() string { return proto.CompactTextString(m) } func (*UpdateCredCacheRequest) ProtoMessage() {} func (*UpdateCredCacheRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_700b50b08ed8dbaf, []int{3} + return fileDescriptor_700b50b08ed8dbaf, []int{2} } func (m *UpdateCredCacheRequest) XXX_Unmarshal(b []byte) error { @@ -261,7 +206,7 @@ func (m *RefreshPolicyInfoCacheRequest) Reset() { *m = RefreshPolicyInfo func (m *RefreshPolicyInfoCacheRequest) String() string { return proto.CompactTextString(m) } func (*RefreshPolicyInfoCacheRequest) ProtoMessage() {} func (*RefreshPolicyInfoCacheRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_700b50b08ed8dbaf, []int{4} + return fileDescriptor_700b50b08ed8dbaf, []int{3} } func (m *RefreshPolicyInfoCacheRequest) XXX_Unmarshal(b []byte) error { @@ -305,7 +250,6 @@ func (m *RefreshPolicyInfoCacheRequest) GetOpKey() string { func init() { proto.RegisterType((*InvalidateCollMetaCacheRequest)(nil), "milvus.proto.proxy.InvalidateCollMetaCacheRequest") - proto.RegisterType((*ReleaseDQLMessageStreamRequest)(nil), "milvus.proto.proxy.ReleaseDQLMessageStreamRequest") proto.RegisterType((*InvalidateCredCacheRequest)(nil), "milvus.proto.proxy.InvalidateCredCacheRequest") proto.RegisterType((*UpdateCredCacheRequest)(nil), "milvus.proto.proxy.UpdateCredCacheRequest") proto.RegisterType((*RefreshPolicyInfoCacheRequest)(nil), "milvus.proto.proxy.RefreshPolicyInfoCacheRequest") @@ -314,45 +258,39 @@ func init() { func init() { proto.RegisterFile("proxy.proto", fileDescriptor_700b50b08ed8dbaf) } var fileDescriptor_700b50b08ed8dbaf = []byte{ - // 603 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xdd, 0x6e, 0xd3, 0x30, - 0x14, 0x5e, 0xe8, 0x36, 0xc6, 0x59, 0x35, 0x90, 0x35, 0xb6, 0x11, 0xd8, 0x34, 0x05, 0x04, 0xd3, - 0x24, 0xda, 0x51, 0x78, 0x82, 0xb5, 0xd2, 0x54, 0xc1, 0xd0, 0x48, 0x41, 0x48, 0x70, 0x81, 0x9c, - 0xe4, 0xac, 0xf5, 0xe4, 0xd8, 0x59, 0xec, 0x0c, 0x7a, 0x85, 0xc4, 0x25, 0xaf, 0xc1, 0x4b, 0xf0, - 0x78, 0x28, 0x3f, 0x4b, 0x9b, 0x36, 0x69, 0x04, 0x13, 0x77, 0xfe, 0xec, 0xcf, 0xe7, 0xfb, 0x8e, - 0x8f, 0xcf, 0x81, 0xf5, 0x20, 0x94, 0xdf, 0xc6, 0xad, 0x20, 0x94, 0x5a, 0x12, 0xe2, 0x33, 0x7e, - 0x15, 0xa9, 0x14, 0xb5, 0x92, 0x13, 0xb3, 0xe9, 0x4a, 0xdf, 0x97, 0x22, 0xdd, 0x33, 0x37, 0x98, - 0xd0, 0x18, 0x0a, 0xca, 0x33, 0xdc, 0x9c, 0xbe, 0x61, 0xfd, 0x36, 0x60, 0xaf, 0x2f, 0xae, 0x28, - 0x67, 0x1e, 0xd5, 0xd8, 0x95, 0x9c, 0x9f, 0xa2, 0xa6, 0x5d, 0xea, 0x8e, 0xd0, 0xc6, 0xcb, 0x08, - 0x95, 0x26, 0x47, 0xb0, 0xec, 0x50, 0x85, 0x3b, 0xc6, 0xbe, 0x71, 0xb0, 0xde, 0x79, 0xd4, 0x2a, - 0x28, 0x66, 0x52, 0xa7, 0x6a, 0x78, 0x4c, 0x15, 0xda, 0x09, 0x93, 0x6c, 0xc3, 0x6d, 0xcf, 0xf9, - 0x22, 0xa8, 0x8f, 0x3b, 0xb7, 0xf6, 0x8d, 0x83, 0x3b, 0xf6, 0xaa, 0xe7, 0xbc, 0xa5, 0x3e, 0x92, - 0x67, 0x70, 0xd7, 0x95, 0x9c, 0xa3, 0xab, 0x99, 0x14, 0x29, 0xa1, 0x91, 0x10, 0x36, 0x26, 0xdb, - 0x09, 0xd1, 0x82, 0xe6, 0x64, 0xa7, 0xdf, 0xdb, 0x59, 0xde, 0x37, 0x0e, 0x1a, 0x76, 0x61, 0xcf, - 0xfa, 0x69, 0xc0, 0x9e, 0x8d, 0x1c, 0xa9, 0xc2, 0xde, 0xbb, 0x37, 0xa7, 0xa8, 0x14, 0x1d, 0xe2, - 0x40, 0x87, 0x48, 0xfd, 0x7f, 0xb7, 0x4e, 0x60, 0xd9, 0x73, 0xfa, 0xbd, 0xc4, 0x77, 0xc3, 0x4e, - 0xd6, 0x73, 0x66, 0x1a, 0x25, 0x66, 0x2e, 0xc0, 0x9c, 0x7a, 0xc6, 0x10, 0xbd, 0x1b, 0x3e, 0xa1, - 0x09, 0x6b, 0x91, 0x8a, 0xcb, 0x96, 0xbf, 0x61, 0x8e, 0xad, 0x1f, 0x06, 0x6c, 0x7d, 0x08, 0xfe, - 0xbf, 0x50, 0x7c, 0x16, 0x50, 0xa5, 0xbe, 0xca, 0xd0, 0xcb, 0xea, 0x94, 0x63, 0xeb, 0x3b, 0xec, - 0xda, 0x78, 0x1e, 0xa2, 0x1a, 0x9d, 0x49, 0xce, 0xdc, 0x71, 0x5f, 0x9c, 0xcb, 0x1b, 0x5a, 0xd9, - 0x82, 0x55, 0x19, 0xbc, 0x1f, 0x07, 0xa9, 0x91, 0x15, 0x3b, 0x43, 0x64, 0x13, 0x56, 0x64, 0xf0, - 0x1a, 0xc7, 0x99, 0x87, 0x14, 0x74, 0x7e, 0xad, 0xc1, 0xca, 0x59, 0xfc, 0xdf, 0x49, 0x00, 0xe4, - 0x04, 0x75, 0x57, 0xfa, 0x81, 0x14, 0x28, 0xf4, 0x40, 0x53, 0x8d, 0x8a, 0x1c, 0x15, 0x15, 0xf3, - 0x2e, 0x98, 0xa7, 0x66, 0x8e, 0xcd, 0xa7, 0x15, 0x37, 0x66, 0xe8, 0xd6, 0x12, 0xb9, 0x84, 0xcd, - 0x13, 0x4c, 0x20, 0x53, 0x9a, 0xb9, 0xaa, 0x3b, 0xa2, 0x42, 0x20, 0x27, 0x9d, 0x6a, 0xcd, 0x39, - 0xf2, 0xb5, 0xea, 0xe3, 0xe2, 0x9d, 0x0c, 0x0c, 0x74, 0xc8, 0xc4, 0xd0, 0x46, 0x15, 0x48, 0xa1, - 0xd0, 0x5a, 0x22, 0x21, 0xec, 0x16, 0xfb, 0x34, 0xfd, 0x7a, 0x79, 0xb7, 0xce, 0x6a, 0xa7, 0x43, - 0x62, 0x71, 0x6b, 0x9b, 0x0f, 0x4b, 0xab, 0x12, 0x5b, 0x8d, 0xe2, 0x34, 0x29, 0x34, 0x4f, 0x50, - 0xf7, 0xbc, 0xeb, 0xf4, 0x0e, 0xab, 0xd3, 0xcb, 0x49, 0x7f, 0x99, 0x16, 0x87, 0xed, 0x8a, 0x1e, - 0x2e, 0x4f, 0x68, 0x71, 0xc3, 0xd7, 0x25, 0xf4, 0x11, 0xee, 0x0d, 0x50, 0x78, 0x03, 0xa4, 0xa1, - 0x3b, 0xb2, 0x51, 0x45, 0x5c, 0x93, 0x27, 0x15, 0x49, 0x4d, 0x93, 0x54, 0x5d, 0xe0, 0xcf, 0x40, - 0xe2, 0xc0, 0x36, 0xea, 0x90, 0xe1, 0x15, 0x66, 0xa1, 0xab, 0x3e, 0x54, 0x91, 0x56, 0x1b, 0xfc, - 0x02, 0x1e, 0x14, 0x67, 0x0b, 0x0a, 0xcd, 0x28, 0x4f, 0xcb, 0xde, 0xaa, 0x29, 0xfb, 0xcc, 0x84, - 0xa8, 0xd3, 0x72, 0xe0, 0xfe, 0x64, 0xb4, 0x4c, 0xeb, 0x1c, 0x96, 0xe9, 0x94, 0x4f, 0xa1, 0xfa, - 0x7c, 0xb6, 0xca, 0x47, 0x07, 0x79, 0x51, 0x5e, 0xf2, 0x05, 0x63, 0xa6, 0x46, 0xeb, 0xf8, 0xd5, - 0xa7, 0xce, 0x90, 0xe9, 0x51, 0xe4, 0xc4, 0x27, 0xed, 0x94, 0xfa, 0x9c, 0xc9, 0x6c, 0xd5, 0xbe, - 0x2e, 0x49, 0x3b, 0xb9, 0xdd, 0x4e, 0x04, 0x03, 0xc7, 0x59, 0x4d, 0xe0, 0xcb, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x61, 0xb0, 0x0e, 0xba, 0x6b, 0x07, 0x00, 0x00, + // 504 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x5d, 0xd8, 0x5a, 0xe0, 0xae, 0x1a, 0x92, 0x35, 0x4a, 0x09, 0x0c, 0x55, 0x41, 0x82, 0x6a, + 0x12, 0xed, 0x28, 0xfc, 0x82, 0x75, 0x52, 0x55, 0xa1, 0xa1, 0x29, 0x83, 0x17, 0x5e, 0x90, 0x93, + 0xdc, 0xb5, 0xae, 0x1c, 0xdb, 0x8b, 0x9d, 0x41, 0x9f, 0x90, 0xf8, 0x55, 0xfc, 0x3b, 0x50, 0x3e, + 0x9a, 0x36, 0x5d, 0xba, 0x0a, 0x55, 0x7b, 0xcb, 0xb1, 0xcf, 0xd5, 0x39, 0x27, 0xbe, 0x07, 0xf6, + 0x55, 0x24, 0x7f, 0xce, 0xba, 0x2a, 0x92, 0x46, 0x12, 0x12, 0x32, 0x7e, 0x13, 0xeb, 0x0c, 0x75, + 0xd3, 0x1b, 0xbb, 0xe1, 0xcb, 0x30, 0x94, 0x22, 0x3b, 0xb3, 0x0f, 0x98, 0x30, 0x18, 0x09, 0xca, + 0x73, 0xdc, 0x58, 0x9e, 0x70, 0xfe, 0x58, 0xf0, 0x6a, 0x24, 0x6e, 0x28, 0x67, 0x01, 0x35, 0x38, + 0x90, 0x9c, 0x9f, 0xa3, 0xa1, 0x03, 0xea, 0x4f, 0xd0, 0xc5, 0xeb, 0x18, 0xb5, 0x21, 0x27, 0xb0, + 0xe7, 0x51, 0x8d, 0x2d, 0xab, 0x6d, 0x75, 0xf6, 0xfb, 0x2f, 0xbb, 0x25, 0xc5, 0x5c, 0xea, 0x5c, + 0x8f, 0x4f, 0xa9, 0x46, 0x37, 0x65, 0x92, 0x67, 0xf0, 0x30, 0xf0, 0xbe, 0x0b, 0x1a, 0x62, 0xeb, + 0x41, 0xdb, 0xea, 0x3c, 0x76, 0xeb, 0x81, 0xf7, 0x99, 0x86, 0x48, 0xde, 0xc2, 0x13, 0x5f, 0x72, + 0x8e, 0xbe, 0x61, 0x52, 0x64, 0x84, 0xdd, 0x94, 0x70, 0xb0, 0x38, 0x4e, 0x89, 0x0e, 0x34, 0x16, + 0x27, 0xa3, 0xb3, 0xd6, 0x5e, 0xdb, 0xea, 0xec, 0xba, 0xa5, 0x33, 0x67, 0x0a, 0xf6, 0x92, 0xf3, + 0x08, 0x83, 0x2d, 0x5d, 0xdb, 0xf0, 0x28, 0xd6, 0xc9, 0x9f, 0x2a, 0x6c, 0x17, 0xd8, 0xf9, 0x6d, + 0x41, 0xf3, 0xab, 0xba, 0x7f, 0xa1, 0xe4, 0x4e, 0x51, 0xad, 0x7f, 0xc8, 0x28, 0xc8, 0x7f, 0x4d, + 0x81, 0x9d, 0x5f, 0x70, 0xe4, 0xe2, 0x55, 0x84, 0x7a, 0x72, 0x21, 0x39, 0xf3, 0x67, 0x23, 0x71, + 0x25, 0xb7, 0xb4, 0xd2, 0x84, 0xba, 0x54, 0x5f, 0x66, 0x2a, 0x33, 0x52, 0x73, 0x73, 0x44, 0x0e, + 0xa1, 0x26, 0xd5, 0x27, 0x9c, 0xe5, 0x1e, 0x32, 0xd0, 0xff, 0x5b, 0x83, 0xda, 0x45, 0xb2, 0x62, + 0x44, 0x01, 0x19, 0xa2, 0x19, 0xc8, 0x50, 0x49, 0x81, 0xc2, 0x5c, 0x1a, 0x6a, 0x50, 0x93, 0x93, + 0xb2, 0x62, 0xb1, 0x78, 0xb7, 0xa9, 0xb9, 0x63, 0xfb, 0xcd, 0x9a, 0x89, 0x15, 0xba, 0xb3, 0x43, + 0xae, 0xe1, 0x70, 0x88, 0x29, 0x64, 0xda, 0x30, 0x5f, 0x0f, 0x26, 0x54, 0x08, 0xe4, 0xa4, 0xbf, + 0x5e, 0xf3, 0x16, 0x79, 0xae, 0xfa, 0xba, 0x3c, 0x93, 0x83, 0x4b, 0x13, 0x31, 0x31, 0x76, 0x51, + 0x2b, 0x29, 0x34, 0x3a, 0x3b, 0x24, 0x82, 0xa3, 0x72, 0x35, 0xb2, 0xd5, 0x2b, 0x0a, 0xb2, 0xaa, + 0x9d, 0xf5, 0xf2, 0xee, 0x36, 0xd9, 0x2f, 0x2a, 0x5f, 0x25, 0xb1, 0x1a, 0x27, 0x31, 0x29, 0x34, + 0x86, 0x68, 0xce, 0x82, 0x79, 0xbc, 0xe3, 0xf5, 0xf1, 0x0a, 0xd2, 0x7f, 0xc6, 0x9a, 0xc2, 0xf3, + 0x72, 0x6f, 0x50, 0x18, 0x46, 0x79, 0x16, 0xa9, 0xbb, 0x21, 0xd2, 0xca, 0xf6, 0x6f, 0x8a, 0xe3, + 0xc1, 0xd3, 0x45, 0x6d, 0x96, 0x75, 0x8e, 0xab, 0x74, 0xaa, 0x1b, 0xb6, 0x49, 0x63, 0x0a, 0xcd, + 0xea, 0x5a, 0x90, 0xf7, 0x55, 0x22, 0x77, 0x56, 0x68, 0x83, 0xd6, 0xe9, 0xc7, 0x6f, 0xfd, 0x31, + 0x33, 0x93, 0xd8, 0x4b, 0x6e, 0x7a, 0x19, 0xf5, 0x1d, 0x93, 0xf9, 0x57, 0x6f, 0xfe, 0x3c, 0xbd, + 0x74, 0xba, 0x97, 0x0a, 0x2a, 0xcf, 0xab, 0xa7, 0xf0, 0xc3, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x33, 0xad, 0xd0, 0x9b, 0xba, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -371,9 +309,6 @@ type ProxyClient interface { GetStatisticsChannel(ctx context.Context, in *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) InvalidateCollectionMetaCache(ctx context.Context, in *InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) GetDdChannel(ctx context.Context, in *internalpb.GetDdChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) - ReleaseDQLMessageStream(ctx context.Context, in *ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) - SendSearchResult(ctx context.Context, in *internalpb.SearchResults, opts ...grpc.CallOption) (*commonpb.Status, error) - SendRetrieveResult(ctx context.Context, in *internalpb.RetrieveResults, opts ...grpc.CallOption) (*commonpb.Status, error) InvalidateCredentialCache(ctx context.Context, in *InvalidateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) UpdateCredentialCache(ctx context.Context, in *UpdateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) RefreshPolicyInfoCache(ctx context.Context, in *RefreshPolicyInfoCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) @@ -423,33 +358,6 @@ func (c *proxyClient) GetDdChannel(ctx context.Context, in *internalpb.GetDdChan return out, nil } -func (c *proxyClient) ReleaseDQLMessageStream(ctx context.Context, in *ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { - out := new(commonpb.Status) - err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/ReleaseDQLMessageStream", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *proxyClient) SendSearchResult(ctx context.Context, in *internalpb.SearchResults, opts ...grpc.CallOption) (*commonpb.Status, error) { - out := new(commonpb.Status) - err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/SendSearchResult", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *proxyClient) SendRetrieveResult(ctx context.Context, in *internalpb.RetrieveResults, opts ...grpc.CallOption) (*commonpb.Status, error) { - out := new(commonpb.Status) - err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/SendRetrieveResult", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *proxyClient) InvalidateCredentialCache(ctx context.Context, in *InvalidateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { out := new(commonpb.Status) err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/InvalidateCredentialCache", in, out, opts...) @@ -483,9 +391,6 @@ type ProxyServer interface { GetStatisticsChannel(context.Context, *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) InvalidateCollectionMetaCache(context.Context, *InvalidateCollMetaCacheRequest) (*commonpb.Status, error) GetDdChannel(context.Context, *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) - ReleaseDQLMessageStream(context.Context, *ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) - SendSearchResult(context.Context, *internalpb.SearchResults) (*commonpb.Status, error) - SendRetrieveResult(context.Context, *internalpb.RetrieveResults) (*commonpb.Status, error) InvalidateCredentialCache(context.Context, *InvalidateCredCacheRequest) (*commonpb.Status, error) UpdateCredentialCache(context.Context, *UpdateCredCacheRequest) (*commonpb.Status, error) RefreshPolicyInfoCache(context.Context, *RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) @@ -507,15 +412,6 @@ func (*UnimplementedProxyServer) InvalidateCollectionMetaCache(ctx context.Conte func (*UnimplementedProxyServer) GetDdChannel(ctx context.Context, req *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetDdChannel not implemented") } -func (*UnimplementedProxyServer) ReleaseDQLMessageStream(ctx context.Context, req *ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReleaseDQLMessageStream not implemented") -} -func (*UnimplementedProxyServer) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendSearchResult not implemented") -} -func (*UnimplementedProxyServer) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendRetrieveResult not implemented") -} func (*UnimplementedProxyServer) InvalidateCredentialCache(ctx context.Context, req *InvalidateCredCacheRequest) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method InvalidateCredentialCache not implemented") } @@ -602,60 +498,6 @@ func _Proxy_GetDdChannel_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } -func _Proxy_ReleaseDQLMessageStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReleaseDQLMessageStreamRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProxyServer).ReleaseDQLMessageStream(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/milvus.proto.proxy.Proxy/ReleaseDQLMessageStream", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProxyServer).ReleaseDQLMessageStream(ctx, req.(*ReleaseDQLMessageStreamRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Proxy_SendSearchResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(internalpb.SearchResults) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProxyServer).SendSearchResult(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/milvus.proto.proxy.Proxy/SendSearchResult", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProxyServer).SendSearchResult(ctx, req.(*internalpb.SearchResults)) - } - return interceptor(ctx, in, info, handler) -} - -func _Proxy_SendRetrieveResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(internalpb.RetrieveResults) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProxyServer).SendRetrieveResult(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/milvus.proto.proxy.Proxy/SendRetrieveResult", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProxyServer).SendRetrieveResult(ctx, req.(*internalpb.RetrieveResults)) - } - return interceptor(ctx, in, info, handler) -} - func _Proxy_InvalidateCredentialCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(InvalidateCredCacheRequest) if err := dec(in); err != nil { @@ -730,18 +572,6 @@ var _Proxy_serviceDesc = grpc.ServiceDesc{ MethodName: "GetDdChannel", Handler: _Proxy_GetDdChannel_Handler, }, - { - MethodName: "ReleaseDQLMessageStream", - Handler: _Proxy_ReleaseDQLMessageStream_Handler, - }, - { - MethodName: "SendSearchResult", - Handler: _Proxy_SendSearchResult_Handler, - }, - { - MethodName: "SendRetrieveResult", - Handler: _Proxy_SendRetrieveResult_Handler, - }, { MethodName: "InvalidateCredentialCache", Handler: _Proxy_InvalidateCredentialCache_Handler, diff --git a/internal/proto/root_coord.proto b/internal/proto/root_coord.proto index 82dab4bd7f..15f85b5e16 100644 --- a/internal/proto/root_coord.proto +++ b/internal/proto/root_coord.proto @@ -103,7 +103,6 @@ service RootCoord { rpc AllocTimestamp(AllocTimestampRequest) returns (AllocTimestampResponse) {} rpc AllocID(AllocIDRequest) returns (AllocIDResponse) {} rpc UpdateChannelTimeTick(internal.ChannelTimeTickMsg) returns (common.Status) {} - rpc ReleaseDQLMessageStream(proxy.ReleaseDQLMessageStreamRequest) returns (common.Status) {} rpc InvalidateCollectionMetaCache(proxy.InvalidateCollMetaCacheRequest) returns (common.Status) {} // rpc SegmentFlushCompleted(data.SegmentFlushCompletedMsg) returns (common.Status) {} @@ -209,4 +208,5 @@ message GetCredentialResponse { string username = 2; // password stored in etcd/mysql string password = 3; -} \ No newline at end of file +} + diff --git a/internal/proto/rootcoordpb/root_coord.pb.go b/internal/proto/rootcoordpb/root_coord.pb.go index 20efc03b64..6b6a3e3081 100644 --- a/internal/proto/rootcoordpb/root_coord.pb.go +++ b/internal/proto/rootcoordpb/root_coord.pb.go @@ -674,103 +674,101 @@ func init() { func init() { proto.RegisterFile("root_coord.proto", fileDescriptor_4513485a144f6b06) } var fileDescriptor_4513485a144f6b06 = []byte{ - // 1526 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xed, 0x72, 0xd3, 0x46, - 0x17, 0xc6, 0x36, 0x49, 0xec, 0x63, 0xc7, 0x0e, 0x3b, 0x7c, 0xf8, 0x35, 0xbc, 0x2f, 0xc6, 0x2f, - 0x05, 0xf3, 0xe5, 0xd0, 0x30, 0x43, 0x29, 0xff, 0x88, 0xcd, 0x04, 0x4f, 0xc9, 0x10, 0x64, 0xe8, - 0xd0, 0x0f, 0xc6, 0x5d, 0x4b, 0x07, 0x47, 0x13, 0x59, 0x6b, 0xb4, 0xeb, 0x7c, 0x4c, 0x7f, 0x75, - 0xa6, 0xff, 0x7b, 0x27, 0xbd, 0x88, 0xf6, 0x52, 0x7a, 0x23, 0x9d, 0xd5, 0x4a, 0xb2, 0x24, 0x4b, - 0x8e, 0x02, 0xfc, 0xf3, 0xae, 0x9e, 0x7d, 0x9e, 0xb3, 0xcf, 0xee, 0x9e, 0xb3, 0x6b, 0xd8, 0x70, - 0x18, 0x13, 0x43, 0x9d, 0x31, 0xc7, 0xe8, 0x4c, 0x1d, 0x26, 0x18, 0xb9, 0x3c, 0x31, 0xad, 0xc3, - 0x19, 0x57, 0xad, 0x8e, 0xfc, 0xec, 0x7e, 0x6d, 0x54, 0x74, 0x36, 0x99, 0x30, 0x5b, 0xf5, 0x37, - 0x2a, 0x61, 0x54, 0xa3, 0x6a, 0xda, 0x02, 0x1d, 0x9b, 0x5a, 0x5e, 0xbb, 0x3c, 0x75, 0xd8, 0xf1, - 0x89, 0xd7, 0xa8, 0xa1, 0xd0, 0x8d, 0xe1, 0x04, 0x05, 0x55, 0x1d, 0xad, 0x21, 0x5c, 0x7a, 0x66, - 0x59, 0x4c, 0x7f, 0x63, 0x4e, 0x90, 0x0b, 0x3a, 0x99, 0x6a, 0xf8, 0x71, 0x86, 0x5c, 0x90, 0x87, - 0x70, 0x7e, 0x44, 0x39, 0xd6, 0x73, 0xcd, 0x5c, 0xbb, 0xbc, 0x75, 0xad, 0x13, 0x89, 0xc4, 0x93, - 0xdf, 0xe5, 0xe3, 0x6d, 0xca, 0x51, 0x73, 0x91, 0xe4, 0x22, 0xac, 0xe8, 0x6c, 0x66, 0x8b, 0x7a, - 0xa1, 0x99, 0x6b, 0xaf, 0x6b, 0xaa, 0xd1, 0xfa, 0x2d, 0x07, 0x97, 0xe3, 0x0a, 0x7c, 0xca, 0x6c, - 0x8e, 0xe4, 0x11, 0xac, 0x72, 0x41, 0xc5, 0x8c, 0x7b, 0x22, 0x57, 0x13, 0x45, 0x06, 0x2e, 0x44, - 0xf3, 0xa0, 0xe4, 0x1a, 0x94, 0x84, 0xcf, 0x54, 0xcf, 0x37, 0x73, 0xed, 0xf3, 0xda, 0xbc, 0x23, - 0x25, 0x86, 0x77, 0x50, 0x75, 0x43, 0xe8, 0xf7, 0xbe, 0xc0, 0xec, 0xf2, 0x61, 0x66, 0x0b, 0x6a, - 0x01, 0xf3, 0xe7, 0xcc, 0xaa, 0x0a, 0xf9, 0x7e, 0xcf, 0xa5, 0x2e, 0x68, 0xf9, 0x7e, 0x2f, 0x65, - 0x1e, 0x7f, 0xe5, 0xa1, 0xd2, 0x9f, 0x4c, 0x99, 0x23, 0x34, 0xe4, 0x33, 0x4b, 0x7c, 0x9a, 0xd6, - 0x15, 0x58, 0x13, 0x94, 0x1f, 0x0c, 0x4d, 0xc3, 0x13, 0x5c, 0x95, 0xcd, 0xbe, 0x41, 0xae, 0x43, - 0xd9, 0xa0, 0x82, 0xda, 0xcc, 0x40, 0xf9, 0xb1, 0xe0, 0x7e, 0x04, 0xbf, 0xab, 0x6f, 0x90, 0xc7, - 0xb0, 0x22, 0x39, 0xb0, 0x7e, 0xbe, 0x99, 0x6b, 0x57, 0xb7, 0x9a, 0x89, 0x6a, 0x2a, 0x40, 0xa9, - 0x89, 0x9a, 0x82, 0x93, 0x06, 0x14, 0x39, 0x8e, 0x27, 0x68, 0x0b, 0x5e, 0x5f, 0x69, 0x16, 0xda, - 0x05, 0x2d, 0x68, 0x93, 0xff, 0x40, 0x91, 0xce, 0x04, 0x1b, 0x9a, 0x06, 0xaf, 0xaf, 0xba, 0xdf, - 0xd6, 0x64, 0xbb, 0x6f, 0x70, 0x72, 0x15, 0x4a, 0x0e, 0x3b, 0x1a, 0x2a, 0x23, 0xd6, 0xdc, 0x68, - 0x8a, 0x0e, 0x3b, 0xea, 0xca, 0x36, 0xf9, 0x06, 0x56, 0x4c, 0xfb, 0x03, 0xe3, 0xf5, 0x62, 0xb3, - 0xd0, 0x2e, 0x6f, 0xdd, 0x48, 0x8c, 0xe5, 0x3b, 0x3c, 0xf9, 0x9e, 0x5a, 0x33, 0xdc, 0xa3, 0xa6, - 0xa3, 0x29, 0x7c, 0xeb, 0x8f, 0x1c, 0x5c, 0xe9, 0x21, 0xd7, 0x1d, 0x73, 0x84, 0x03, 0x2f, 0x8a, - 0x4f, 0xdf, 0x16, 0x2d, 0xa8, 0xe8, 0xcc, 0xb2, 0x50, 0x17, 0x26, 0xb3, 0x83, 0x25, 0x8c, 0xf4, - 0x91, 0xff, 0x01, 0x78, 0xd3, 0xed, 0xf7, 0x78, 0xbd, 0xe0, 0x4e, 0x32, 0xd4, 0xd3, 0x9a, 0x41, - 0xcd, 0x0b, 0x44, 0x12, 0xf7, 0xed, 0x0f, 0x6c, 0x81, 0x36, 0x97, 0x40, 0xdb, 0x84, 0xf2, 0x94, - 0x3a, 0xc2, 0x8c, 0x28, 0x87, 0xbb, 0xe4, 0x59, 0x09, 0x64, 0xbc, 0xe5, 0x9c, 0x77, 0xb4, 0xfe, - 0xc9, 0x43, 0xc5, 0xd3, 0x95, 0x9a, 0x9c, 0xf4, 0xa0, 0x24, 0xe7, 0x34, 0x94, 0x3e, 0x79, 0x16, - 0xdc, 0xee, 0x24, 0x67, 0xa0, 0x4e, 0x2c, 0x60, 0xad, 0x38, 0xf2, 0x43, 0xef, 0x41, 0xd9, 0xb4, - 0x0d, 0x3c, 0x1e, 0xaa, 0xe5, 0xc9, 0xbb, 0xcb, 0xf3, 0xff, 0x28, 0x8f, 0xcc, 0x42, 0x9d, 0x40, - 0xdb, 0xc0, 0x63, 0x97, 0x03, 0x4c, 0xff, 0x27, 0x27, 0x08, 0x17, 0xf0, 0x58, 0x38, 0x74, 0x18, - 0xe6, 0x2a, 0xb8, 0x5c, 0xdf, 0x9e, 0x12, 0x93, 0x4b, 0xd0, 0x79, 0x2e, 0x47, 0x07, 0xdc, 0xfc, - 0xb9, 0x2d, 0x9c, 0x13, 0xad, 0x86, 0xd1, 0xde, 0xc6, 0x2f, 0x70, 0x31, 0x09, 0x48, 0x36, 0xa0, - 0x70, 0x80, 0x27, 0x9e, 0xed, 0xf2, 0x27, 0xd9, 0x82, 0x95, 0x43, 0xb9, 0x95, 0x5c, 0x9f, 0x17, - 0xf6, 0x86, 0x3b, 0xa1, 0xf9, 0x4c, 0x14, 0xf4, 0x69, 0xfe, 0x49, 0xae, 0xf5, 0x77, 0x1e, 0xea, - 0x8b, 0xdb, 0xed, 0x73, 0x72, 0x45, 0x96, 0x2d, 0x37, 0x86, 0x75, 0x6f, 0xa1, 0x23, 0xd6, 0x6d, - 0xa7, 0x59, 0x97, 0x16, 0x61, 0xc4, 0x53, 0xe5, 0x61, 0x85, 0x87, 0xba, 0x1a, 0x08, 0x17, 0x16, - 0x20, 0x09, 0xee, 0x3d, 0x8d, 0xba, 0x77, 0x33, 0xcb, 0x12, 0x86, 0x5d, 0x34, 0xe0, 0xe2, 0x0e, - 0x8a, 0xae, 0x83, 0x06, 0xda, 0xc2, 0xa4, 0xd6, 0xa7, 0x1f, 0xd8, 0x06, 0x14, 0x67, 0x5c, 0xd6, - 0xc7, 0x89, 0x0a, 0xa6, 0xa4, 0x05, 0xed, 0xd6, 0xef, 0x39, 0xb8, 0x14, 0x93, 0xf9, 0x9c, 0x85, - 0x5a, 0x22, 0x25, 0xbf, 0x4d, 0x29, 0xe7, 0x47, 0xcc, 0x51, 0x89, 0xb6, 0xa4, 0x05, 0xed, 0xad, - 0x3f, 0xaf, 0x43, 0x49, 0x63, 0x4c, 0x74, 0xa5, 0x25, 0x64, 0x0a, 0x44, 0xc6, 0xc4, 0x26, 0x53, - 0x66, 0xa3, 0xad, 0x12, 0x2b, 0x27, 0x0f, 0xa3, 0x01, 0x04, 0x35, 0x7f, 0x11, 0xea, 0x59, 0xd5, - 0xb8, 0x95, 0x32, 0x22, 0x06, 0x6f, 0x9d, 0x23, 0x13, 0x57, 0x51, 0xd6, 0xeb, 0x37, 0xa6, 0x7e, - 0xd0, 0xdd, 0xa7, 0xb6, 0x8d, 0xd6, 0x32, 0xc5, 0x18, 0xd4, 0x57, 0x8c, 0x1d, 0x7a, 0xaf, 0x31, - 0x10, 0x8e, 0x69, 0x8f, 0x7d, 0x67, 0x5b, 0xe7, 0xc8, 0x47, 0x77, 0x6d, 0xa5, 0xba, 0xc9, 0x85, - 0xa9, 0x73, 0x5f, 0x70, 0x2b, 0x5d, 0x70, 0x01, 0x7c, 0x46, 0xc9, 0x21, 0x6c, 0x74, 0x1d, 0xa4, - 0x02, 0xbb, 0xc1, 0xa1, 0x21, 0xf7, 0x13, 0x87, 0xc6, 0x61, 0xbe, 0xd0, 0xb2, 0x0d, 0xd0, 0x3a, - 0x47, 0x7e, 0x82, 0x6a, 0xcf, 0x61, 0xd3, 0x10, 0xfd, 0xdd, 0x44, 0xfa, 0x28, 0x28, 0x23, 0xf9, - 0x10, 0xd6, 0x5f, 0x50, 0x1e, 0xe2, 0xbe, 0x93, 0xc8, 0x1d, 0xc1, 0xf8, 0xd4, 0x37, 0x12, 0xa1, - 0xdb, 0x8c, 0x59, 0x21, 0x7b, 0x8e, 0x80, 0xf8, 0x09, 0x21, 0xa4, 0xd2, 0x49, 0x9e, 0xc1, 0x02, - 0xd0, 0x97, 0xda, 0xcc, 0x8c, 0x0f, 0x84, 0xdf, 0x42, 0x59, 0x19, 0xfe, 0xcc, 0x32, 0x29, 0x27, - 0xb7, 0x97, 0x2c, 0x89, 0x8b, 0xc8, 0x68, 0xd8, 0x6b, 0x28, 0x49, 0xa3, 0x15, 0xe9, 0x57, 0xa9, - 0x0b, 0x71, 0x16, 0xca, 0x01, 0xc0, 0x33, 0x4b, 0xa0, 0xa3, 0x38, 0x6f, 0x25, 0x72, 0xce, 0x01, - 0x19, 0x49, 0x6d, 0xa8, 0x0d, 0xf6, 0xe5, 0x05, 0xc7, 0xb7, 0x86, 0x93, 0x7b, 0xc9, 0x1b, 0x3a, - 0x8a, 0xf2, 0xe9, 0xef, 0x67, 0x03, 0x07, 0x76, 0xbf, 0x87, 0x9a, 0x32, 0x73, 0xcf, 0xbf, 0x34, - 0xa4, 0xe8, 0xc5, 0x50, 0x19, 0xa7, 0xf3, 0x03, 0xac, 0x4b, 0x5b, 0xe7, 0xe4, 0x77, 0x52, 0xad, - 0x3f, 0x2b, 0xf5, 0x7b, 0xa8, 0xbc, 0xa0, 0x7c, 0xce, 0xdc, 0x4e, 0x3b, 0x01, 0x0b, 0xc4, 0x99, - 0x0e, 0xc0, 0x01, 0x54, 0xa5, 0x6b, 0xc1, 0x60, 0x9e, 0x72, 0x7c, 0xa3, 0x20, 0x5f, 0xe2, 0x5e, - 0x26, 0x6c, 0x20, 0x86, 0x50, 0x91, 0xdf, 0xfc, 0xd2, 0x9b, 0x32, 0x97, 0x30, 0xc4, 0x17, 0xba, - 0x93, 0x01, 0x19, 0x4a, 0xb3, 0xd5, 0xe8, 0x3b, 0x8c, 0x3c, 0x48, 0xab, 0xc2, 0x89, 0x2f, 0xc2, - 0x46, 0x27, 0x2b, 0x3c, 0x90, 0xfc, 0x19, 0xd6, 0xbc, 0xd7, 0x51, 0xfc, 0x84, 0xc4, 0x06, 0x07, - 0x0f, 0xb3, 0xc6, 0xed, 0x53, 0x71, 0x01, 0x3b, 0x85, 0x4b, 0x6f, 0xa7, 0x86, 0xcc, 0xce, 0xaa, - 0x06, 0xf8, 0x55, 0x28, 0xbe, 0xcd, 0xe6, 0x95, 0x2e, 0x8a, 0xdb, 0xe5, 0xe3, 0xd3, 0xb6, 0x99, - 0x05, 0x57, 0x34, 0xb4, 0x90, 0x72, 0xec, 0xbd, 0x7e, 0xb9, 0x8b, 0x9c, 0xd3, 0x31, 0x0e, 0x84, - 0x83, 0x74, 0x12, 0xaf, 0x4e, 0xea, 0x91, 0x9d, 0x02, 0xce, 0xb8, 0xa9, 0x1d, 0xf8, 0x6f, 0xdf, - 0x3e, 0xa4, 0x96, 0x69, 0x44, 0x4a, 0xce, 0x2e, 0x0a, 0xda, 0xa5, 0xfa, 0x3e, 0x26, 0x6b, 0x46, - 0x87, 0x04, 0xe0, 0x8c, 0x9a, 0xbf, 0x02, 0x51, 0xf9, 0xc1, 0xfe, 0x60, 0x8e, 0x67, 0x0e, 0x55, - 0xbb, 0x3d, 0xad, 0xd6, 0x2f, 0x42, 0x7d, 0x99, 0xaf, 0xcf, 0x30, 0x22, 0x54, 0x86, 0x61, 0x07, - 0xc5, 0x2e, 0x0a, 0xc7, 0xd4, 0xd3, 0x92, 0xe8, 0x1c, 0x90, 0xb2, 0x45, 0x12, 0x70, 0x81, 0xc0, - 0x00, 0x56, 0xd5, 0x73, 0x94, 0xb4, 0x12, 0x07, 0xf9, 0x8f, 0xe9, 0x65, 0x97, 0x87, 0xe0, 0xc1, - 0x1d, 0x4a, 0x0e, 0x3b, 0x28, 0x42, 0xcf, 0xdc, 0x94, 0xe4, 0x10, 0x05, 0x2d, 0x4f, 0x0e, 0x71, - 0x6c, 0x20, 0x66, 0x43, 0xed, 0xa5, 0xc9, 0xbd, 0x8f, 0x6f, 0x28, 0x3f, 0x48, 0x2b, 0x09, 0x31, - 0xd4, 0xf2, 0x92, 0xb0, 0x00, 0x0e, 0x39, 0x56, 0xd1, 0x50, 0x7e, 0xf0, 0x7c, 0x4b, 0xbd, 0xa9, - 0x87, 0xff, 0x87, 0x38, 0x6d, 0x93, 0xbd, 0x0b, 0xae, 0x5b, 0xc1, 0xcd, 0x3a, 0x5e, 0x86, 0xe7, - 0x87, 0x34, 0x80, 0xc8, 0x47, 0x40, 0x06, 0x66, 0x2f, 0x07, 0x7c, 0x69, 0xe6, 0x21, 0x6c, 0xf4, - 0xd0, 0xc2, 0x08, 0xf3, 0xfd, 0x94, 0x1b, 0x4d, 0x14, 0x96, 0xf1, 0xe4, 0xed, 0xc3, 0xba, 0x5c, - 0x06, 0x39, 0xee, 0x2d, 0x47, 0x87, 0xa7, 0x54, 0xc7, 0x08, 0xc6, 0xa7, 0xbe, 0x9b, 0x05, 0x1a, - 0xda, 0x43, 0xeb, 0x91, 0x57, 0x4d, 0x7c, 0x1e, 0xf3, 0x45, 0x4d, 0x7a, 0x63, 0x35, 0x1e, 0x64, - 0x44, 0x87, 0xf6, 0x10, 0xa8, 0xe5, 0xd6, 0x98, 0x85, 0x29, 0xc7, 0x7a, 0x0e, 0xc8, 0x68, 0xd7, - 0x2b, 0x28, 0xca, 0x8b, 0x82, 0x4b, 0x79, 0x33, 0xf5, 0x1e, 0x71, 0x06, 0xc2, 0xf7, 0x50, 0x7b, - 0x35, 0x45, 0x87, 0x0a, 0x94, 0x7e, 0xb9, 0xbc, 0xc9, 0x27, 0x2b, 0x86, 0xca, 0x7c, 0x49, 0x87, - 0x01, 0xca, 0x0c, 0xbe, 0xc4, 0x84, 0x39, 0x60, 0x79, 0x6e, 0x0b, 0xe3, 0xc2, 0xc9, 0x53, 0xf5, - 0xcb, 0xc0, 0x96, 0x0a, 0xb8, 0x91, 0x67, 0x10, 0x50, 0xb8, 0xf0, 0x23, 0xc9, 0x9b, 0xfa, 0x9e, - 0x63, 0x1e, 0x9a, 0x16, 0x8e, 0x31, 0xe5, 0x04, 0xc4, 0x61, 0x19, 0x2d, 0x1a, 0x41, 0x59, 0x09, - 0xef, 0x38, 0xd4, 0x16, 0x64, 0x59, 0x68, 0x2e, 0xc2, 0xa7, 0x6d, 0x9f, 0x0e, 0x0c, 0x26, 0xa1, - 0x03, 0xc8, 0x63, 0xb1, 0xc7, 0x2c, 0x53, 0x3f, 0x89, 0x5f, 0xad, 0x82, 0xd4, 0x30, 0x87, 0xa4, - 0x5c, 0xad, 0x12, 0x91, 0xbe, 0xc8, 0xf6, 0x93, 0x1f, 0x1f, 0x8f, 0x4d, 0xb1, 0x3f, 0x1b, 0xc9, - 0x29, 0x6e, 0xaa, 0x81, 0x0f, 0x4c, 0xe6, 0xfd, 0xda, 0xf4, 0x07, 0x6f, 0xba, 0x5c, 0x9b, 0xc1, - 0x01, 0x9a, 0x8e, 0x46, 0xab, 0x6e, 0xd7, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x8b, - 0x7b, 0x42, 0xfb, 0x17, 0x00, 0x00, + // 1496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x5b, 0x73, 0x13, 0x37, + 0x14, 0xc6, 0x36, 0x49, 0xec, 0x63, 0xc7, 0x0e, 0x1a, 0x2e, 0xae, 0xa1, 0xd4, 0xb8, 0x14, 0xcc, + 0xcd, 0xa1, 0x61, 0x86, 0x52, 0xde, 0x88, 0xcd, 0x04, 0x4f, 0x9b, 0x21, 0x5d, 0x43, 0x87, 0x5e, + 0x18, 0x57, 0xde, 0x15, 0x8e, 0x26, 0xeb, 0x95, 0x59, 0xc9, 0xb9, 0x4c, 0x9f, 0x3a, 0xd3, 0xf7, + 0xfe, 0xa5, 0x4e, 0xfb, 0x53, 0xfa, 0x47, 0x3a, 0x5a, 0xed, 0xca, 0xbb, 0xeb, 0x5d, 0x67, 0x03, + 0xbc, 0x59, 0xda, 0x4f, 0xdf, 0x77, 0xf4, 0x49, 0x47, 0x47, 0x32, 0x6c, 0xb8, 0x8c, 0x89, 0xa1, + 0xc9, 0x98, 0x6b, 0x75, 0xa6, 0x2e, 0x13, 0x0c, 0x5d, 0x9e, 0x50, 0xfb, 0x70, 0xc6, 0x55, 0xab, + 0x23, 0x3f, 0x7b, 0x5f, 0x1b, 0x15, 0x93, 0x4d, 0x26, 0xcc, 0x51, 0xfd, 0x8d, 0x4a, 0x18, 0xd5, + 0xa8, 0x52, 0x47, 0x10, 0xd7, 0xc1, 0xb6, 0xdf, 0x2e, 0x4f, 0x5d, 0x76, 0x7c, 0xe2, 0x37, 0x6a, + 0x44, 0x98, 0xd6, 0x70, 0x42, 0x04, 0x56, 0x1d, 0xad, 0x21, 0x5c, 0x7a, 0x66, 0xdb, 0xcc, 0x7c, + 0x45, 0x27, 0x84, 0x0b, 0x3c, 0x99, 0x1a, 0xe4, 0xfd, 0x8c, 0x70, 0x81, 0x1e, 0xc2, 0xf9, 0x11, + 0xe6, 0xa4, 0x9e, 0x6b, 0xe6, 0xda, 0xe5, 0xad, 0x6b, 0x9d, 0x48, 0x24, 0xbe, 0xfc, 0x2e, 0x1f, + 0x6f, 0x63, 0x4e, 0x0c, 0x0f, 0x89, 0x2e, 0xc2, 0x8a, 0xc9, 0x66, 0x8e, 0xa8, 0x17, 0x9a, 0xb9, + 0xf6, 0xba, 0xa1, 0x1a, 0xad, 0x3f, 0x72, 0x70, 0x39, 0xae, 0xc0, 0xa7, 0xcc, 0xe1, 0x04, 0x3d, + 0x82, 0x55, 0x2e, 0xb0, 0x98, 0x71, 0x5f, 0xe4, 0x6a, 0xa2, 0xc8, 0xc0, 0x83, 0x18, 0x3e, 0x14, + 0x5d, 0x83, 0x92, 0x08, 0x98, 0xea, 0xf9, 0x66, 0xae, 0x7d, 0xde, 0x98, 0x77, 0xa4, 0xc4, 0xf0, + 0x06, 0xaa, 0x5e, 0x08, 0xfd, 0xde, 0x27, 0x98, 0x5d, 0x3e, 0xcc, 0x6c, 0x43, 0x4d, 0x33, 0x7f, + 0xcc, 0xac, 0xaa, 0x90, 0xef, 0xf7, 0x3c, 0xea, 0x82, 0x91, 0xef, 0xf7, 0x52, 0xe6, 0xf1, 0x4f, + 0x1e, 0x2a, 0xfd, 0xc9, 0x94, 0xb9, 0xc2, 0x20, 0x7c, 0x66, 0x8b, 0x0f, 0xd3, 0xba, 0x02, 0x6b, + 0x02, 0xf3, 0x83, 0x21, 0xb5, 0x7c, 0xc1, 0x55, 0xd9, 0xec, 0x5b, 0xe8, 0x0b, 0x28, 0x5b, 0x58, + 0x60, 0x87, 0x59, 0x44, 0x7e, 0x2c, 0x78, 0x1f, 0x21, 0xe8, 0xea, 0x5b, 0xe8, 0x31, 0xac, 0x48, + 0x0e, 0x52, 0x3f, 0xdf, 0xcc, 0xb5, 0xab, 0x5b, 0xcd, 0x44, 0x35, 0x15, 0xa0, 0xd4, 0x24, 0x86, + 0x82, 0xa3, 0x06, 0x14, 0x39, 0x19, 0x4f, 0x88, 0x23, 0x78, 0x7d, 0xa5, 0x59, 0x68, 0x17, 0x0c, + 0xdd, 0x46, 0x9f, 0x41, 0x11, 0xcf, 0x04, 0x1b, 0x52, 0x8b, 0xd7, 0x57, 0xbd, 0x6f, 0x6b, 0xb2, + 0xdd, 0xb7, 0x38, 0xba, 0x0a, 0x25, 0x97, 0x1d, 0x0d, 0x95, 0x11, 0x6b, 0x5e, 0x34, 0x45, 0x97, + 0x1d, 0x75, 0x65, 0x1b, 0x7d, 0x03, 0x2b, 0xd4, 0x79, 0xc7, 0x78, 0xbd, 0xd8, 0x2c, 0xb4, 0xcb, + 0x5b, 0x37, 0x12, 0x63, 0xf9, 0x8e, 0x9c, 0xfc, 0x88, 0xed, 0x19, 0xd9, 0xc3, 0xd4, 0x35, 0x14, + 0xbe, 0xf5, 0x57, 0x0e, 0xae, 0xf4, 0x08, 0x37, 0x5d, 0x3a, 0x22, 0x03, 0x3f, 0x8a, 0x0f, 0xdf, + 0x16, 0x2d, 0xa8, 0x98, 0xcc, 0xb6, 0x89, 0x29, 0x28, 0x73, 0xf4, 0x12, 0x46, 0xfa, 0xd0, 0x75, + 0x00, 0x7f, 0xba, 0xfd, 0x1e, 0xaf, 0x17, 0xbc, 0x49, 0x86, 0x7a, 0x5a, 0x33, 0xa8, 0xf9, 0x81, + 0x48, 0xe2, 0xbe, 0xf3, 0x8e, 0x2d, 0xd0, 0xe6, 0x12, 0x68, 0x9b, 0x50, 0x9e, 0x62, 0x57, 0xd0, + 0x88, 0x72, 0xb8, 0x4b, 0xe6, 0x8a, 0x96, 0xf1, 0x97, 0x73, 0xde, 0xd1, 0xfa, 0x2f, 0x0f, 0x15, + 0x5f, 0x57, 0x6a, 0x72, 0xd4, 0x83, 0x92, 0x9c, 0xd3, 0x50, 0xfa, 0xe4, 0x5b, 0x70, 0xbb, 0x93, + 0x7c, 0x02, 0x75, 0x62, 0x01, 0x1b, 0xc5, 0x51, 0x10, 0x7a, 0x0f, 0xca, 0xd4, 0xb1, 0xc8, 0xf1, + 0x50, 0x2d, 0x4f, 0xde, 0x5b, 0x9e, 0x2f, 0xa3, 0x3c, 0xf2, 0x14, 0xea, 0x68, 0x6d, 0x8b, 0x1c, + 0x7b, 0x1c, 0x40, 0x83, 0x9f, 0x1c, 0x11, 0xb8, 0x40, 0x8e, 0x85, 0x8b, 0x87, 0x61, 0xae, 0x82, + 0xc7, 0xf5, 0xed, 0x29, 0x31, 0x79, 0x04, 0x9d, 0xe7, 0x72, 0xb4, 0xe6, 0xe6, 0xcf, 0x1d, 0xe1, + 0x9e, 0x18, 0x35, 0x12, 0xed, 0x6d, 0xfc, 0x06, 0x17, 0x93, 0x80, 0x68, 0x03, 0x0a, 0x07, 0xe4, + 0xc4, 0xb7, 0x5d, 0xfe, 0x44, 0x5b, 0xb0, 0x72, 0x28, 0xb7, 0x92, 0xe7, 0xf3, 0xc2, 0xde, 0xf0, + 0x26, 0x34, 0x9f, 0x89, 0x82, 0x3e, 0xcd, 0x3f, 0xc9, 0xb5, 0xfe, 0xcd, 0x43, 0x7d, 0x71, 0xbb, + 0x7d, 0xcc, 0x59, 0x91, 0x65, 0xcb, 0x8d, 0x61, 0xdd, 0x5f, 0xe8, 0x88, 0x75, 0xdb, 0x69, 0xd6, + 0xa5, 0x45, 0x18, 0xf1, 0x54, 0x79, 0x58, 0xe1, 0xa1, 0xae, 0x06, 0x81, 0x0b, 0x0b, 0x90, 0x04, + 0xf7, 0x9e, 0x46, 0xdd, 0xbb, 0x99, 0x65, 0x09, 0xc3, 0x2e, 0x5a, 0x70, 0x71, 0x87, 0x88, 0xae, + 0x4b, 0x2c, 0xe2, 0x08, 0x8a, 0xed, 0x0f, 0x4f, 0xd8, 0x06, 0x14, 0x67, 0x5c, 0xd6, 0xc7, 0x89, + 0x0a, 0xa6, 0x64, 0xe8, 0x76, 0xeb, 0xcf, 0x1c, 0x5c, 0x8a, 0xc9, 0x7c, 0xcc, 0x42, 0x2d, 0x91, + 0x92, 0xdf, 0xa6, 0x98, 0xf3, 0x23, 0xe6, 0xaa, 0x83, 0xb6, 0x64, 0xe8, 0xf6, 0xd6, 0xdf, 0xd7, + 0xa1, 0x64, 0x30, 0x26, 0xba, 0xd2, 0x12, 0x34, 0x05, 0x24, 0x63, 0x62, 0x93, 0x29, 0x73, 0x88, + 0xa3, 0x0e, 0x56, 0x8e, 0x1e, 0x46, 0x03, 0xd0, 0x35, 0x7f, 0x11, 0xea, 0x5b, 0xd5, 0xb8, 0x95, + 0x32, 0x22, 0x06, 0x6f, 0x9d, 0x43, 0x13, 0x4f, 0x51, 0xd6, 0xeb, 0x57, 0xd4, 0x3c, 0xe8, 0xee, + 0x63, 0xc7, 0x21, 0xf6, 0x32, 0xc5, 0x18, 0x34, 0x50, 0x8c, 0x25, 0xbd, 0xdf, 0x18, 0x08, 0x97, + 0x3a, 0xe3, 0xc0, 0xd9, 0xd6, 0x39, 0xf4, 0xde, 0x5b, 0x5b, 0xa9, 0x4e, 0xb9, 0xa0, 0x26, 0x0f, + 0x04, 0xb7, 0xd2, 0x05, 0x17, 0xc0, 0x67, 0x94, 0x1c, 0xc2, 0x46, 0xd7, 0x25, 0x58, 0x90, 0xae, + 0x4e, 0x1a, 0x74, 0x3f, 0x71, 0x68, 0x1c, 0x16, 0x08, 0x2d, 0xdb, 0x00, 0xad, 0x73, 0xe8, 0x17, + 0xa8, 0xf6, 0x5c, 0x36, 0x0d, 0xd1, 0xdf, 0x4d, 0xa4, 0x8f, 0x82, 0x32, 0x92, 0x0f, 0x61, 0xfd, + 0x05, 0xe6, 0x21, 0xee, 0x3b, 0x89, 0xdc, 0x11, 0x4c, 0x40, 0x7d, 0x23, 0x11, 0xba, 0xcd, 0x98, + 0x1d, 0xb2, 0xe7, 0x08, 0x50, 0x70, 0x20, 0x84, 0x54, 0x3a, 0xc9, 0x33, 0x58, 0x00, 0x06, 0x52, + 0x9b, 0x99, 0xf1, 0x5a, 0xf8, 0x35, 0x94, 0x95, 0xe1, 0xcf, 0x6c, 0x8a, 0x39, 0xba, 0xbd, 0x64, + 0x49, 0x3c, 0x44, 0x46, 0xc3, 0x7e, 0x80, 0x92, 0x34, 0x5a, 0x91, 0x7e, 0x95, 0xba, 0x10, 0x67, + 0xa1, 0x1c, 0x00, 0x3c, 0xb3, 0x05, 0x71, 0x15, 0xe7, 0xad, 0x44, 0xce, 0x39, 0x20, 0x23, 0xa9, + 0x03, 0xb5, 0xc1, 0xbe, 0xbc, 0xe0, 0x04, 0xd6, 0x70, 0x74, 0x2f, 0x79, 0x43, 0x47, 0x51, 0x01, + 0xfd, 0xfd, 0x6c, 0x60, 0x6d, 0xf7, 0x5b, 0xa8, 0x29, 0x33, 0xf7, 0x82, 0x4b, 0x43, 0x8a, 0x5e, + 0x0c, 0x95, 0x71, 0x3a, 0x3f, 0xc1, 0xba, 0xb4, 0x75, 0x4e, 0x7e, 0x27, 0xd5, 0xfa, 0xb3, 0x52, + 0xbf, 0x85, 0xca, 0x0b, 0xcc, 0xe7, 0xcc, 0xed, 0xb4, 0x0c, 0x58, 0x20, 0xce, 0x94, 0x00, 0x07, + 0x50, 0x95, 0xae, 0xe9, 0xc1, 0x3c, 0x25, 0x7d, 0xa3, 0xa0, 0x40, 0xe2, 0x5e, 0x26, 0xac, 0x16, + 0x23, 0x50, 0x91, 0xdf, 0x82, 0xd2, 0x9b, 0x32, 0x97, 0x30, 0x24, 0x10, 0xba, 0x93, 0x01, 0x19, + 0x3a, 0x66, 0xab, 0xd1, 0x77, 0x18, 0x7a, 0x90, 0x56, 0x85, 0x13, 0x5f, 0x84, 0x8d, 0x4e, 0x56, + 0xb8, 0x96, 0xfc, 0x15, 0xd6, 0xfc, 0xd7, 0x51, 0x3c, 0x43, 0x62, 0x83, 0xf5, 0xc3, 0xac, 0x71, + 0xfb, 0x54, 0x9c, 0x66, 0xc7, 0x70, 0xe9, 0xf5, 0xd4, 0x92, 0xa7, 0xb3, 0xaa, 0x01, 0x41, 0x15, + 0x8a, 0x6f, 0xb3, 0x79, 0xa5, 0x8b, 0xe2, 0x76, 0xf9, 0xf8, 0xb4, 0x6d, 0xe6, 0xc2, 0xe7, 0x7d, + 0xe7, 0x10, 0xdb, 0xd4, 0x8a, 0x14, 0x81, 0x5d, 0x22, 0x70, 0x17, 0x9b, 0xfb, 0x24, 0x5e, 0xa3, + 0xd4, 0x53, 0x3b, 0x3a, 0x44, 0x83, 0x33, 0x6e, 0xed, 0xdf, 0x01, 0xa9, 0x8c, 0x75, 0xde, 0xd1, + 0xf1, 0xcc, 0xc5, 0x6a, 0xff, 0xa5, 0x55, 0xdf, 0x45, 0x68, 0x20, 0xf3, 0xf5, 0x19, 0x46, 0x84, + 0x0a, 0x23, 0xec, 0x10, 0xb1, 0x4b, 0x84, 0x4b, 0xcd, 0xb4, 0x63, 0x6d, 0x0e, 0x48, 0x59, 0xb4, + 0x04, 0x9c, 0x16, 0x18, 0xc0, 0xaa, 0x7a, 0x20, 0xa2, 0x56, 0xe2, 0xa0, 0xe0, 0x79, 0xbb, 0xac, + 0x9c, 0xeb, 0x27, 0x70, 0x28, 0x5d, 0x77, 0x88, 0x08, 0x3d, 0x3c, 0x53, 0xd2, 0x35, 0x0a, 0x5a, + 0x9e, 0xae, 0x71, 0xac, 0x16, 0x73, 0xa0, 0xf6, 0x3d, 0xe5, 0xfe, 0xc7, 0x57, 0x98, 0x1f, 0xa4, + 0x1d, 0xd2, 0x31, 0xd4, 0xf2, 0x43, 0x7a, 0x01, 0x1c, 0x72, 0xac, 0x62, 0x10, 0xf9, 0xc1, 0xf7, + 0x2d, 0xf5, 0xee, 0x1c, 0xfe, 0x67, 0xe0, 0xb4, 0x4d, 0xf6, 0x46, 0x5f, 0x80, 0xf4, 0x5d, 0x37, + 0x5e, 0x18, 0xe7, 0x69, 0xa3, 0x21, 0xf2, 0x5a, 0x9e, 0x81, 0xd9, 0xcf, 0xca, 0x4f, 0xcd, 0x3c, + 0x84, 0x8d, 0x1e, 0xb1, 0x49, 0x84, 0xf9, 0x7e, 0xca, 0x1d, 0x23, 0x0a, 0xcb, 0x98, 0x79, 0xfb, + 0xb0, 0x2e, 0x97, 0x41, 0x8e, 0x7b, 0xcd, 0x89, 0xcb, 0x53, 0xea, 0x55, 0x04, 0x13, 0x50, 0xdf, + 0xcd, 0x02, 0x0d, 0xed, 0xa1, 0xf5, 0xc8, 0x3b, 0x23, 0x3e, 0x8f, 0xf9, 0xa2, 0x26, 0xbd, 0x7a, + 0x1a, 0x0f, 0x32, 0xa2, 0x43, 0x7b, 0x08, 0xd4, 0x72, 0x1b, 0xcc, 0x26, 0x29, 0x69, 0x3d, 0x07, + 0x64, 0xb4, 0xeb, 0x25, 0x14, 0x65, 0xe9, 0xf6, 0x28, 0x6f, 0xa6, 0x56, 0xf6, 0x33, 0x10, 0xbe, + 0x85, 0xda, 0xcb, 0x29, 0x71, 0xb1, 0x20, 0xd2, 0x2f, 0x8f, 0x37, 0x39, 0xb3, 0x62, 0xa8, 0xcc, + 0xd7, 0x66, 0x18, 0x10, 0x79, 0x82, 0x2f, 0x31, 0x61, 0x0e, 0x58, 0x7e, 0xb6, 0x85, 0x71, 0xe1, + 0xc3, 0x53, 0xf5, 0xcb, 0xc0, 0x96, 0x0a, 0x78, 0x91, 0x67, 0x10, 0x50, 0xb8, 0xf0, 0xb3, 0xc5, + 0x9f, 0xfa, 0x9e, 0x4b, 0x0f, 0xa9, 0x4d, 0xc6, 0x24, 0x25, 0x03, 0xe2, 0xb0, 0x8c, 0x16, 0x8d, + 0xa0, 0xac, 0x84, 0x77, 0x5c, 0xec, 0x08, 0xb4, 0x2c, 0x34, 0x0f, 0x11, 0xd0, 0xb6, 0x4f, 0x07, + 0xea, 0x49, 0x98, 0x00, 0x32, 0x2d, 0xf6, 0x98, 0x4d, 0xcd, 0x93, 0xf8, 0x65, 0x47, 0x1f, 0x0d, + 0x73, 0x48, 0xca, 0x65, 0x27, 0x11, 0x19, 0x88, 0x6c, 0x3f, 0xf9, 0xf9, 0xf1, 0x98, 0x8a, 0xfd, + 0xd9, 0x48, 0x4e, 0x71, 0x53, 0x0d, 0x7c, 0x40, 0x99, 0xff, 0x6b, 0x33, 0x18, 0xbc, 0xe9, 0x71, + 0x6d, 0xea, 0x04, 0x9a, 0x8e, 0x46, 0xab, 0x5e, 0xd7, 0xa3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x19, 0x92, 0xda, 0x17, 0x8d, 0x17, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -851,7 +849,6 @@ type RootCoordClient interface { AllocTimestamp(ctx context.Context, in *AllocTimestampRequest, opts ...grpc.CallOption) (*AllocTimestampResponse, error) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg, opts ...grpc.CallOption) (*commonpb.Status, error) - ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) ShowConfigurations(ctx context.Context, in *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error) // https://wiki.lfaidata.foundation/display/MIL/MEP+8+--+Add+metrics+for+proxy @@ -1058,15 +1055,6 @@ func (c *rootCoordClient) UpdateChannelTimeTick(ctx context.Context, in *interna return out, nil } -func (c *rootCoordClient) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { - out := new(commonpb.Status) - err := c.cc.Invoke(ctx, "/milvus.proto.rootcoord.RootCoord/ReleaseDQLMessageStream", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *rootCoordClient) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { out := new(commonpb.Status) err := c.cc.Invoke(ctx, "/milvus.proto.rootcoord.RootCoord/InvalidateCollectionMetaCache", in, out, opts...) @@ -1315,7 +1303,6 @@ type RootCoordServer interface { AllocTimestamp(context.Context, *AllocTimestampRequest) (*AllocTimestampResponse, error) AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error) UpdateChannelTimeTick(context.Context, *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) - ReleaseDQLMessageStream(context.Context, *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) InvalidateCollectionMetaCache(context.Context, *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) ShowConfigurations(context.Context, *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) // https://wiki.lfaidata.foundation/display/MIL/MEP+8+--+Add+metrics+for+proxy @@ -1404,9 +1391,6 @@ func (*UnimplementedRootCoordServer) AllocID(ctx context.Context, req *AllocIDRe func (*UnimplementedRootCoordServer) UpdateChannelTimeTick(ctx context.Context, req *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateChannelTimeTick not implemented") } -func (*UnimplementedRootCoordServer) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReleaseDQLMessageStream not implemented") -} func (*UnimplementedRootCoordServer) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { return nil, status.Errorf(codes.Unimplemented, "method InvalidateCollectionMetaCache not implemented") } @@ -1814,24 +1798,6 @@ func _RootCoord_UpdateChannelTimeTick_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } -func _RootCoord_ReleaseDQLMessageStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proxypb.ReleaseDQLMessageStreamRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RootCoordServer).ReleaseDQLMessageStream(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/milvus.proto.rootcoord.RootCoord/ReleaseDQLMessageStream", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RootCoordServer).ReleaseDQLMessageStream(ctx, req.(*proxypb.ReleaseDQLMessageStreamRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _RootCoord_InvalidateCollectionMetaCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(proxypb.InvalidateCollMetaCacheRequest) if err := dec(in); err != nil { @@ -2272,10 +2238,6 @@ var _RootCoord_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateChannelTimeTick", Handler: _RootCoord_UpdateChannelTimeTick_Handler, }, - { - MethodName: "ReleaseDQLMessageStream", - Handler: _RootCoord_ReleaseDQLMessageStream_Handler, - }, { MethodName: "InvalidateCollectionMetaCache", Handler: _RootCoord_InvalidateCollectionMetaCache_Handler, diff --git a/internal/proto/schema.proto b/internal/proto/schema.proto index 96ba20a4a2..7dd1f45594 100644 --- a/internal/proto/schema.proto +++ b/internal/proto/schema.proto @@ -31,6 +31,13 @@ enum DataType { FloatVector = 101; } +enum FieldState { + FieldCreated = 0; + FieldCreating = 1; + FieldDropping = 2; + FieldDropped = 3; +} + /** * @brief Field schema */ @@ -43,6 +50,7 @@ message FieldSchema { repeated common.KeyValuePair type_params = 6; repeated common.KeyValuePair index_params = 7; bool autoID = 8; + FieldState state = 9; // To keep compatible with older version, the default state is `Created`. } /** diff --git a/internal/proto/schemapb/schema.pb.go b/internal/proto/schemapb/schema.pb.go index e59abf68d3..26e50b52ae 100644 --- a/internal/proto/schemapb/schema.pb.go +++ b/internal/proto/schemapb/schema.pb.go @@ -78,6 +78,37 @@ func (DataType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_1c5fb4d8cc22d66a, []int{0} } +type FieldState int32 + +const ( + FieldState_FieldCreated FieldState = 0 + FieldState_FieldCreating FieldState = 1 + FieldState_FieldDropping FieldState = 2 + FieldState_FieldDropped FieldState = 3 +) + +var FieldState_name = map[int32]string{ + 0: "FieldCreated", + 1: "FieldCreating", + 2: "FieldDropping", + 3: "FieldDropped", +} + +var FieldState_value = map[string]int32{ + "FieldCreated": 0, + "FieldCreating": 1, + "FieldDropping": 2, + "FieldDropped": 3, +} + +func (x FieldState) String() string { + return proto.EnumName(FieldState_name, int32(x)) +} + +func (FieldState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c5fb4d8cc22d66a, []int{1} +} + //* // @brief Field schema type FieldSchema struct { @@ -89,6 +120,7 @@ type FieldSchema struct { TypeParams []*commonpb.KeyValuePair `protobuf:"bytes,6,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,7,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"` AutoID bool `protobuf:"varint,8,opt,name=autoID,proto3" json:"autoID,omitempty"` + State FieldState `protobuf:"varint,9,opt,name=state,proto3,enum=milvus.proto.schema.FieldState" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -175,6 +207,13 @@ func (m *FieldSchema) GetAutoID() bool { return false } +func (m *FieldSchema) GetState() FieldState { + if m != nil { + return m.State + } + return FieldState_FieldCreated +} + //* // @brief Collection schema type CollectionSchema struct { @@ -1022,6 +1061,7 @@ func (m *SearchResultData) GetTopks() []int64 { func init() { proto.RegisterEnum("milvus.proto.schema.DataType", DataType_name, DataType_value) + proto.RegisterEnum("milvus.proto.schema.FieldState", FieldState_name, FieldState_value) proto.RegisterType((*FieldSchema)(nil), "milvus.proto.schema.FieldSchema") proto.RegisterType((*CollectionSchema)(nil), "milvus.proto.schema.CollectionSchema") proto.RegisterType((*BoolArray)(nil), "milvus.proto.schema.BoolArray") @@ -1041,68 +1081,71 @@ func init() { func init() { proto.RegisterFile("schema.proto", fileDescriptor_1c5fb4d8cc22d66a) } var fileDescriptor_1c5fb4d8cc22d66a = []byte{ - // 995 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x5d, 0x6f, 0xe3, 0x44, - 0x17, 0xce, 0xc4, 0xf9, 0xb0, 0x8f, 0xf3, 0xf6, 0xb5, 0x66, 0x17, 0x64, 0x90, 0x76, 0x9b, 0x8d, - 0x40, 0x8a, 0x56, 0xa2, 0xd5, 0xb6, 0x68, 0x59, 0x56, 0xac, 0x80, 0x34, 0xaa, 0x12, 0x15, 0xad, - 0x82, 0x8b, 0x8a, 0xc4, 0x4d, 0x34, 0x89, 0x67, 0xdb, 0x51, 0x6d, 0x8f, 0x99, 0x99, 0xac, 0xc8, - 0x0f, 0xe0, 0x1f, 0x70, 0x85, 0xb8, 0xe0, 0x8f, 0x71, 0x81, 0xf8, 0x1d, 0x48, 0x68, 0x3e, 0x92, - 0x18, 0xe2, 0xad, 0x7a, 0x77, 0x66, 0x7c, 0x9e, 0x67, 0xce, 0x79, 0xce, 0x87, 0xa1, 0x27, 0x97, - 0x37, 0x34, 0x27, 0x47, 0xa5, 0xe0, 0x8a, 0xe3, 0x07, 0x39, 0xcb, 0xde, 0xae, 0xa4, 0x3d, 0x1d, - 0xd9, 0x4f, 0x1f, 0xf6, 0x96, 0x3c, 0xcf, 0x79, 0x61, 0x2f, 0x07, 0x7f, 0x36, 0x21, 0x3c, 0x67, - 0x34, 0x4b, 0x2f, 0xcd, 0x57, 0x1c, 0x43, 0xf7, 0x8d, 0x3e, 0x4e, 0xc7, 0x31, 0xea, 0xa3, 0xa1, - 0x97, 0x6c, 0x8e, 0x18, 0x43, 0xab, 0x20, 0x39, 0x8d, 0x9b, 0x7d, 0x34, 0x0c, 0x12, 0x63, 0xe3, - 0x8f, 0xe0, 0x80, 0xc9, 0x79, 0x29, 0x58, 0x4e, 0xc4, 0x7a, 0x7e, 0x4b, 0xd7, 0xb1, 0xd7, 0x47, - 0x43, 0x3f, 0xe9, 0x31, 0x39, 0xb3, 0x97, 0x17, 0x74, 0x8d, 0xfb, 0x10, 0xa6, 0x54, 0x2e, 0x05, - 0x2b, 0x15, 0xe3, 0x45, 0xdc, 0x32, 0x04, 0xd5, 0x2b, 0xfc, 0x12, 0x82, 0x94, 0x28, 0x32, 0x57, - 0xeb, 0x92, 0xc6, 0xed, 0x3e, 0x1a, 0x1e, 0x9c, 0x3c, 0x3a, 0xaa, 0x09, 0xfe, 0x68, 0x4c, 0x14, - 0xf9, 0x6e, 0x5d, 0xd2, 0xc4, 0x4f, 0x9d, 0x85, 0x47, 0x10, 0x6a, 0xd8, 0xbc, 0x24, 0x82, 0xe4, - 0x32, 0xee, 0xf4, 0xbd, 0x61, 0x78, 0xf2, 0xe4, 0xdf, 0x68, 0x97, 0xf2, 0x05, 0x5d, 0x5f, 0x91, - 0x6c, 0x45, 0x67, 0x84, 0x89, 0x04, 0x34, 0x6a, 0x66, 0x40, 0x78, 0x0c, 0x3d, 0x56, 0xa4, 0xf4, - 0xa7, 0x0d, 0x49, 0xf7, 0xbe, 0x24, 0xa1, 0x81, 0x39, 0x96, 0xf7, 0xa1, 0x43, 0x56, 0x8a, 0x4f, - 0xc7, 0xb1, 0x6f, 0x54, 0x70, 0xa7, 0xc1, 0xaf, 0x08, 0xa2, 0x33, 0x9e, 0x65, 0x74, 0xa9, 0x93, - 0x75, 0x42, 0x6f, 0xe4, 0x44, 0x15, 0x39, 0xff, 0x23, 0x54, 0x73, 0x5f, 0xa8, 0xdd, 0x13, 0x5e, - 0xf5, 0x09, 0xfc, 0x02, 0x3a, 0xa6, 0x4e, 0x32, 0x6e, 0x99, 0xd0, 0xfb, 0xb5, 0xea, 0x55, 0x0a, - 0x9d, 0x38, 0xff, 0xc1, 0x21, 0x04, 0x23, 0xce, 0xb3, 0xaf, 0x85, 0x20, 0x6b, 0x1d, 0x94, 0xd6, - 0x35, 0x46, 0x7d, 0x6f, 0xe8, 0x27, 0xc6, 0x1e, 0x3c, 0x06, 0x7f, 0x5a, 0xa8, 0xfd, 0xef, 0x6d, - 0xf7, 0xfd, 0x10, 0x82, 0x6f, 0x78, 0x71, 0xbd, 0xef, 0xe0, 0x39, 0x87, 0x3e, 0xc0, 0x79, 0xc6, - 0x49, 0x0d, 0x45, 0xd3, 0x79, 0x3c, 0x81, 0x70, 0xcc, 0x57, 0x8b, 0x8c, 0xee, 0xbb, 0xa0, 0x1d, - 0xc9, 0x68, 0xad, 0xa8, 0xdc, 0xf7, 0xe8, 0xed, 0x48, 0x2e, 0x95, 0x60, 0x75, 0x91, 0x04, 0xce, - 0xe5, 0x0f, 0x0f, 0xc2, 0xcb, 0x25, 0xc9, 0x88, 0x30, 0x4a, 0xe0, 0x57, 0x10, 0x2c, 0x38, 0xcf, - 0xe6, 0xce, 0x11, 0x0d, 0xc3, 0x93, 0xc7, 0xb5, 0xc2, 0x6d, 0x15, 0x9a, 0x34, 0x12, 0x5f, 0x43, - 0x74, 0x1f, 0xe2, 0x97, 0xe0, 0xb3, 0x42, 0x59, 0x74, 0xd3, 0xa0, 0xeb, 0x9b, 0x76, 0x23, 0xdf, - 0xa4, 0x91, 0x74, 0x59, 0xa1, 0x0c, 0xf6, 0x15, 0x04, 0x19, 0x2f, 0xae, 0x2d, 0xd8, 0xbb, 0xe3, - 0xe9, 0xad, 0xb6, 0xfa, 0x69, 0x0d, 0x31, 0xf0, 0xaf, 0x00, 0xde, 0x68, 0x4d, 0x2d, 0xbe, 0x65, - 0xf0, 0x87, 0xf5, 0x35, 0xdf, 0x4a, 0x3f, 0x69, 0x24, 0x81, 0x01, 0x19, 0x86, 0x33, 0x08, 0x53, - 0xa3, 0xb9, 0xa5, 0x68, 0x1b, 0x8a, 0xfa, 0xb6, 0xa9, 0xd4, 0x66, 0xd2, 0x48, 0xc0, 0xc2, 0x36, - 0x24, 0xd2, 0x68, 0x6e, 0x49, 0x3a, 0x77, 0x90, 0x54, 0x6a, 0xa3, 0x49, 0x2c, 0x6c, 0x93, 0xcb, - 0x42, 0x97, 0xd6, 0x72, 0x74, 0xef, 0xc8, 0x65, 0xd7, 0x01, 0x3a, 0x17, 0x03, 0xd2, 0x0c, 0xa3, - 0x8e, 0xad, 0xf5, 0xe0, 0x17, 0x04, 0xe1, 0x15, 0x5d, 0x2a, 0xee, 0xea, 0x1b, 0x81, 0x97, 0xb2, - 0xdc, 0x2d, 0x32, 0x6d, 0xea, 0x41, 0xb7, 0xba, 0xbd, 0x35, 0x6e, 0xae, 0x6c, 0xf7, 0x50, 0x2e, - 0x34, 0x30, 0x4b, 0x8e, 0x3f, 0x86, 0xff, 0x2d, 0x58, 0xa1, 0x57, 0x9e, 0xa3, 0xd1, 0x05, 0xec, - 0x4d, 0x1a, 0x49, 0xcf, 0x5e, 0x5b, 0xb7, 0x6d, 0x58, 0x7f, 0x23, 0x08, 0x4c, 0x40, 0x26, 0xdd, - 0x67, 0xd0, 0x32, 0x6b, 0x0e, 0xdd, 0x67, 0xcd, 0x19, 0x57, 0xfc, 0x08, 0xc0, 0x4c, 0xeb, 0xbc, - 0xb2, 0x80, 0x03, 0x73, 0xf3, 0x5a, 0xaf, 0x8d, 0x2f, 0xa0, 0x2b, 0x4d, 0x57, 0x4b, 0xd7, 0x49, - 0xef, 0xa8, 0xc0, 0xae, 0xf3, 0x75, 0x27, 0x3a, 0x88, 0x46, 0xdb, 0x2c, 0xa4, 0xeb, 0xa3, 0x7a, - 0x74, 0x45, 0x57, 0x8d, 0x76, 0x10, 0xfc, 0x01, 0xf8, 0x36, 0x34, 0x96, 0x9a, 0x1e, 0xda, 0xfe, - 0x30, 0xd2, 0x51, 0x17, 0xda, 0xc6, 0x1c, 0xfc, 0x8c, 0xc0, 0x9b, 0x8e, 0x25, 0xfe, 0x0c, 0x3a, - 0x7a, 0x5e, 0x58, 0x7a, 0xe7, 0xac, 0x55, 0x1b, 0xbe, 0xcd, 0x0a, 0x35, 0x4d, 0xf1, 0xe7, 0xd0, - 0x91, 0x4a, 0x68, 0x60, 0xf3, 0xde, 0x1d, 0xd6, 0x96, 0x4a, 0x4c, 0xd3, 0x11, 0x80, 0xcf, 0xd2, - 0xb9, 0x8d, 0xe3, 0x2f, 0x04, 0xd1, 0x25, 0x25, 0x62, 0x79, 0x93, 0x50, 0xb9, 0xca, 0xec, 0x1c, - 0x1c, 0x42, 0x58, 0xac, 0xf2, 0xf9, 0x8f, 0x2b, 0x2a, 0x18, 0x95, 0xae, 0x57, 0xa0, 0x58, 0xe5, - 0xdf, 0xda, 0x1b, 0xfc, 0x00, 0xda, 0x8a, 0x97, 0xf3, 0x5b, 0xf3, 0xb6, 0x97, 0xb4, 0x14, 0x2f, - 0x2f, 0xf0, 0x97, 0x10, 0xda, 0xfd, 0xb9, 0x19, 0x60, 0xef, 0x9d, 0xf9, 0x6c, 0x2b, 0x9f, 0xd8, - 0x22, 0x9a, 0x96, 0xd5, 0x8b, 0x5c, 0x2e, 0xb9, 0xa0, 0x76, 0x61, 0x37, 0x13, 0x77, 0xc2, 0x4f, - 0xc1, 0x63, 0xa9, 0x74, 0xe3, 0x18, 0xd7, 0xaf, 0x93, 0xb1, 0x4c, 0xb4, 0x13, 0x7e, 0x68, 0x22, - 0xbb, 0xb5, 0xff, 0x3c, 0x2f, 0xb1, 0x87, 0xa7, 0xbf, 0x21, 0xf0, 0x37, 0xfd, 0x83, 0x7d, 0x68, - 0xbd, 0xe6, 0x05, 0x8d, 0x1a, 0xda, 0xd2, 0x5b, 0x2c, 0x42, 0xda, 0x9a, 0x16, 0xea, 0x45, 0xd4, - 0xc4, 0x01, 0xb4, 0xa7, 0x85, 0x7a, 0xf6, 0x3c, 0xf2, 0x9c, 0x79, 0x7a, 0x12, 0xb5, 0x9c, 0xf9, - 0xfc, 0xd3, 0xa8, 0xad, 0x4d, 0x33, 0x05, 0x11, 0x60, 0x80, 0x8e, 0xdd, 0x03, 0x51, 0xa8, 0x6d, - 0x2b, 0x76, 0xf4, 0x10, 0x87, 0xd0, 0xbd, 0x22, 0xe2, 0xec, 0x86, 0x88, 0xe8, 0x3d, 0x1c, 0x41, - 0x6f, 0x54, 0x99, 0x80, 0x28, 0xc5, 0xff, 0x87, 0xf0, 0x7c, 0x37, 0x39, 0x11, 0x1d, 0x7d, 0x0f, - 0x07, 0x8c, 0x6f, 0xf2, 0xba, 0x16, 0xe5, 0x72, 0x14, 0xda, 0x3f, 0xd2, 0x4c, 0xe7, 0x38, 0x43, - 0x3f, 0x9c, 0x5e, 0x33, 0x75, 0xb3, 0x5a, 0xe8, 0xdf, 0xed, 0xb1, 0x75, 0xfb, 0x84, 0x71, 0x67, - 0x1d, 0xb3, 0x42, 0x51, 0x51, 0x90, 0xec, 0xd8, 0x28, 0x72, 0x6c, 0x15, 0x29, 0x17, 0xbf, 0x23, - 0xb4, 0xe8, 0x98, 0xab, 0xd3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd1, 0xcc, 0x21, 0x94, 0x03, - 0x09, 0x00, 0x00, + // 1051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x6f, 0x6f, 0x1b, 0xc5, + 0x13, 0xf6, 0xfa, 0xfc, 0xe7, 0x6e, 0xce, 0xed, 0xef, 0x7e, 0xdb, 0x82, 0x0e, 0xa4, 0x36, 0xae, + 0x05, 0x92, 0x55, 0x89, 0x44, 0x4d, 0xa0, 0x94, 0x8a, 0x0a, 0x70, 0xac, 0x28, 0x56, 0x50, 0x15, + 0x2e, 0x28, 0x48, 0xbc, 0xb1, 0xd6, 0xbe, 0x6d, 0xb2, 0xca, 0xf9, 0xf6, 0xd8, 0x5d, 0x57, 0xf8, + 0x03, 0xf0, 0x0d, 0x78, 0x81, 0x10, 0x2f, 0xf8, 0x62, 0xbc, 0xe2, 0x73, 0x20, 0xa1, 0x9d, 0x5d, + 0xff, 0x29, 0x71, 0xa2, 0xbc, 0x9b, 0x9d, 0x9b, 0xe7, 0xb9, 0x99, 0x67, 0x66, 0x67, 0xa1, 0xa3, + 0xa7, 0x97, 0x7c, 0xc6, 0x76, 0x2b, 0x25, 0x8d, 0xa4, 0x0f, 0x66, 0xa2, 0x78, 0x3b, 0xd7, 0xee, + 0xb4, 0xeb, 0x3e, 0x7d, 0xd8, 0x99, 0xca, 0xd9, 0x4c, 0x96, 0xce, 0xd9, 0xfb, 0x2d, 0x80, 0xf8, + 0x48, 0xf0, 0x22, 0x3f, 0xc3, 0xaf, 0x34, 0x85, 0xf6, 0x1b, 0x7b, 0x1c, 0x0d, 0x53, 0xd2, 0x25, + 0xfd, 0x20, 0x5b, 0x1e, 0x29, 0x85, 0x46, 0xc9, 0x66, 0x3c, 0xad, 0x77, 0x49, 0x3f, 0xca, 0xd0, + 0xa6, 0x1f, 0xc1, 0x7d, 0xa1, 0xc7, 0x95, 0x12, 0x33, 0xa6, 0x16, 0xe3, 0x2b, 0xbe, 0x48, 0x83, + 0x2e, 0xe9, 0x87, 0x59, 0x47, 0xe8, 0x53, 0xe7, 0x3c, 0xe1, 0x0b, 0xda, 0x85, 0x38, 0xe7, 0x7a, + 0xaa, 0x44, 0x65, 0x84, 0x2c, 0xd3, 0x06, 0x12, 0x6c, 0xba, 0xe8, 0x4b, 0x88, 0x72, 0x66, 0xd8, + 0xd8, 0x2c, 0x2a, 0x9e, 0x36, 0xbb, 0xa4, 0x7f, 0x7f, 0xff, 0xd1, 0xee, 0x96, 0xe4, 0x77, 0x87, + 0xcc, 0xb0, 0xef, 0x17, 0x15, 0xcf, 0xc2, 0xdc, 0x5b, 0x74, 0x00, 0xb1, 0x85, 0x8d, 0x2b, 0xa6, + 0xd8, 0x4c, 0xa7, 0xad, 0x6e, 0xd0, 0x8f, 0xf7, 0x9f, 0xbc, 0x8b, 0xf6, 0x25, 0x9f, 0xf0, 0xc5, + 0x39, 0x2b, 0xe6, 0xfc, 0x94, 0x09, 0x95, 0x81, 0x45, 0x9d, 0x22, 0x88, 0x0e, 0xa1, 0x23, 0xca, + 0x9c, 0xff, 0xbc, 0x24, 0x69, 0xdf, 0x95, 0x24, 0x46, 0x98, 0x67, 0x79, 0x1f, 0x5a, 0x6c, 0x6e, + 0xe4, 0x68, 0x98, 0x86, 0xa8, 0x82, 0x3f, 0xd1, 0xcf, 0xa0, 0xa9, 0x0d, 0x33, 0x3c, 0x8d, 0xb0, + 0xb2, 0x9d, 0xad, 0x95, 0xb9, 0x26, 0xd8, 0xb0, 0xcc, 0x45, 0xf7, 0x7e, 0x27, 0x90, 0x1c, 0xca, + 0xa2, 0xe0, 0x53, 0xab, 0x91, 0xef, 0xcf, 0xb2, 0x0b, 0x64, 0xa3, 0x0b, 0xff, 0xd1, 0xb7, 0x7e, + 0x5d, 0xdf, 0x75, 0x66, 0xc1, 0x3b, 0x99, 0xbd, 0x80, 0x16, 0xb6, 0x57, 0xa7, 0x0d, 0xac, 0xb8, + 0x7b, 0x4b, 0x6a, 0x68, 0x67, 0x3e, 0xbe, 0xb7, 0x03, 0xd1, 0x40, 0xca, 0xe2, 0x1b, 0xa5, 0xd8, + 0xc2, 0x26, 0x65, 0xdb, 0x91, 0x92, 0x6e, 0xd0, 0x0f, 0x33, 0xb4, 0x7b, 0x8f, 0x21, 0x1c, 0x95, + 0xe6, 0xfa, 0xf7, 0xa6, 0xff, 0xbe, 0x03, 0xd1, 0xb7, 0xb2, 0xbc, 0xb8, 0x1e, 0x10, 0xf8, 0x80, + 0x2e, 0xc0, 0x51, 0x21, 0xd9, 0x16, 0x8a, 0xba, 0x8f, 0x78, 0x02, 0xf1, 0x50, 0xce, 0x27, 0x05, + 0xbf, 0x1e, 0x42, 0xd6, 0x24, 0x83, 0x85, 0xe1, 0xfa, 0x7a, 0x44, 0x67, 0x4d, 0x72, 0x66, 0x94, + 0xd8, 0x96, 0x49, 0xe4, 0x43, 0xfe, 0x0a, 0x20, 0x3e, 0x9b, 0xb2, 0x82, 0x29, 0x54, 0x82, 0xbe, + 0x82, 0x68, 0x22, 0x65, 0x31, 0xf6, 0x81, 0xa4, 0x1f, 0xef, 0x3f, 0xde, 0x2a, 0xdc, 0x4a, 0xa1, + 0xe3, 0x5a, 0x16, 0x5a, 0x88, 0x1d, 0x5f, 0xfa, 0x12, 0x42, 0x51, 0x1a, 0x87, 0xae, 0x23, 0x7a, + 0xfb, 0xac, 0x2f, 0xe5, 0x3b, 0xae, 0x65, 0x6d, 0x51, 0x1a, 0xc4, 0xbe, 0x82, 0xa8, 0x90, 0xe5, + 0x85, 0x03, 0x07, 0xb7, 0xfc, 0x7a, 0xa5, 0xad, 0xfd, 0xb5, 0x85, 0x20, 0xfc, 0x6b, 0x80, 0x37, + 0x56, 0x53, 0x87, 0x6f, 0x20, 0xfe, 0x86, 0x71, 0x5c, 0x49, 0x7f, 0x5c, 0xcb, 0x22, 0x04, 0x21, + 0xc3, 0x21, 0xc4, 0x39, 0x6a, 0xee, 0x28, 0x9a, 0x48, 0xb1, 0x7d, 0x6c, 0x36, 0x7a, 0x73, 0x5c, + 0xcb, 0xc0, 0xc1, 0x96, 0x24, 0x1a, 0x35, 0x77, 0x24, 0xad, 0x5b, 0x48, 0x36, 0x7a, 0x63, 0x49, + 0x1c, 0x6c, 0x59, 0xcb, 0xc4, 0xb6, 0xd6, 0x71, 0xb4, 0x6f, 0xa9, 0x65, 0x3d, 0x01, 0xb6, 0x16, + 0x04, 0x59, 0x86, 0x41, 0xcb, 0xf5, 0xba, 0xf7, 0x2b, 0x81, 0xf8, 0x9c, 0x4f, 0x8d, 0xf4, 0xfd, + 0x4d, 0x20, 0xc8, 0xc5, 0xcc, 0xef, 0x3f, 0x6b, 0xda, 0xfd, 0xe0, 0x74, 0x7b, 0x8b, 0x61, 0xbe, + 0x6d, 0x77, 0x50, 0x2e, 0x46, 0x98, 0x23, 0xa7, 0x1f, 0xc3, 0xbd, 0x89, 0x28, 0xed, 0xa6, 0xf4, + 0x34, 0xb6, 0x81, 0x9d, 0xe3, 0x5a, 0xd6, 0x71, 0x6e, 0x17, 0xb6, 0x4a, 0xeb, 0x1f, 0x02, 0x11, + 0x26, 0x84, 0xe5, 0x3e, 0x83, 0x06, 0x6e, 0x47, 0x72, 0x97, 0xed, 0x88, 0xa1, 0xf4, 0x11, 0x00, + 0xde, 0xd6, 0xf1, 0xc6, 0xde, 0x8e, 0xd0, 0xf3, 0xda, 0xae, 0x8d, 0x2f, 0xa1, 0xad, 0x71, 0xaa, + 0xb5, 0x9f, 0xa4, 0x1b, 0x3a, 0xb0, 0x9e, 0x7c, 0x3b, 0x89, 0x1e, 0x62, 0xd1, 0xae, 0x0a, 0xed, + 0xe7, 0x68, 0x3b, 0x7a, 0x43, 0x57, 0x8b, 0xf6, 0x10, 0xfa, 0x01, 0x84, 0x2e, 0x35, 0x91, 0xe3, + 0x0c, 0xad, 0xde, 0x99, 0x7c, 0xd0, 0x86, 0x26, 0x9a, 0xbd, 0x5f, 0x08, 0x04, 0xa3, 0xa1, 0xa6, + 0x9f, 0x43, 0xcb, 0xde, 0x17, 0x91, 0xdf, 0x7a, 0xd7, 0x36, 0x07, 0xbe, 0x29, 0x4a, 0x33, 0xca, + 0xe9, 0x17, 0xd0, 0xd2, 0x46, 0x59, 0x60, 0xfd, 0xce, 0x13, 0xd6, 0xd4, 0x46, 0x8d, 0xf2, 0x01, + 0x40, 0x28, 0xf2, 0xb1, 0xcb, 0xe3, 0x6f, 0x02, 0xc9, 0x19, 0x67, 0x6a, 0x7a, 0x99, 0x71, 0x3d, + 0x2f, 0xdc, 0x3d, 0xd8, 0x81, 0xb8, 0x9c, 0xcf, 0xc6, 0x3f, 0xcd, 0xb9, 0x12, 0x5c, 0xfb, 0x59, + 0x81, 0x72, 0x3e, 0xfb, 0xce, 0x79, 0xe8, 0x03, 0x68, 0x1a, 0x59, 0x8d, 0xaf, 0xf0, 0xdf, 0x41, + 0xd6, 0x30, 0xb2, 0x3a, 0xa1, 0x5f, 0x41, 0xec, 0xf6, 0xe7, 0xf2, 0x02, 0x07, 0x37, 0xd6, 0xb3, + 0xea, 0x7c, 0xe6, 0x9a, 0x88, 0x23, 0x6b, 0x17, 0xb9, 0x9e, 0x4a, 0xc5, 0xdd, 0xc2, 0xae, 0x67, + 0xfe, 0x44, 0x9f, 0x42, 0x20, 0x72, 0xed, 0xaf, 0x63, 0xba, 0x7d, 0x9d, 0x0c, 0x75, 0x66, 0x83, + 0xe8, 0x43, 0xcc, 0xec, 0xca, 0x3d, 0x95, 0x41, 0xe6, 0x0e, 0x4f, 0xff, 0x20, 0x10, 0x2e, 0xe7, + 0x87, 0x86, 0xd0, 0x78, 0x2d, 0x4b, 0x9e, 0xd4, 0xac, 0x65, 0xb7, 0x58, 0x42, 0xac, 0x35, 0x2a, + 0xcd, 0x8b, 0xa4, 0x4e, 0x23, 0x68, 0x8e, 0x4a, 0xf3, 0xec, 0x79, 0x12, 0x78, 0xf3, 0x60, 0x3f, + 0x69, 0x78, 0xf3, 0xf9, 0xa7, 0x49, 0xd3, 0x9a, 0x78, 0x0b, 0x12, 0xa0, 0x00, 0x2d, 0xb7, 0x07, + 0x92, 0xd8, 0xda, 0x4e, 0xec, 0xe4, 0x21, 0x8d, 0xa1, 0x7d, 0xce, 0xd4, 0xe1, 0x25, 0x53, 0xc9, + 0x7b, 0x34, 0x81, 0xce, 0x60, 0xe3, 0x06, 0x24, 0x39, 0xfd, 0x1f, 0xc4, 0x47, 0xeb, 0x9b, 0x93, + 0xf0, 0xa7, 0xe7, 0x00, 0xeb, 0x17, 0xd2, 0x02, 0xf0, 0x74, 0xa8, 0x38, 0x33, 0x3c, 0x4f, 0x6a, + 0xf4, 0xff, 0x70, 0x6f, 0xed, 0xb1, 0xbf, 0x20, 0x2b, 0xd7, 0x50, 0xc9, 0xaa, 0xb2, 0xae, 0xfa, + 0x0a, 0x87, 0x2e, 0x9e, 0x27, 0xc1, 0xe0, 0x07, 0xb8, 0x2f, 0xe4, 0x52, 0xaf, 0x0b, 0x55, 0x4d, + 0x07, 0xb1, 0x7b, 0xe9, 0x4e, 0xad, 0x76, 0xa7, 0xe4, 0xc7, 0x83, 0x0b, 0x61, 0x2e, 0xe7, 0x13, + 0xfb, 0xfa, 0xef, 0xb9, 0xb0, 0x4f, 0x84, 0xf4, 0xd6, 0x9e, 0x28, 0x0d, 0x57, 0x25, 0x2b, 0xf6, + 0x50, 0xe9, 0x3d, 0xa7, 0x74, 0x35, 0xf9, 0x93, 0x90, 0x49, 0x0b, 0x5d, 0x07, 0xff, 0x06, 0x00, + 0x00, 0xff, 0xff, 0x31, 0x59, 0x18, 0x2e, 0x92, 0x09, 0x00, 0x00, } diff --git a/internal/proxy/impl.go b/internal/proxy/impl.go index 4ba2f8bfdb..fb06292ec2 100644 --- a/internal/proxy/impl.go +++ b/internal/proxy/impl.go @@ -129,29 +129,6 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p }, nil } -// ReleaseDQLMessageStream release the query message stream of specific collection. -func (node *Proxy) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - ctx = logutil.WithModule(ctx, moduleName) - logutil.Logger(ctx).Debug("received request to release DQL message strem", - zap.Any("role", typeutil.ProxyRole), - zap.Any("db", request.DbID), - zap.Any("collection", request.CollectionID)) - - if !node.checkHealthy() { - return unhealthyStatus(), nil - } - - logutil.Logger(ctx).Debug("complete to release DQL message stream", - zap.Any("role", typeutil.ProxyRole), - zap.Any("db", request.DbID), - zap.Any("collection", request.CollectionID)) - - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - // CreateCollection create a collection by the schema. // TODO(dragondriver): add more detailed ut for ConsistencyLevel, should we support multiple consistency level in Proxy? func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) { @@ -4021,22 +3998,6 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser }, nil } -// SendSearchResult needs to be removed TODO -func (node *Proxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - Reason: "Not implemented", - }, nil -} - -// SendRetrieveResult needs to be removed TODO -func (node *Proxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - Reason: "Not implemented", - }, nil -} - func (node *Proxy) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest) (*commonpb.Status, error) { logger.Debug("CreateRole", zap.Any("req", req)) if code, ok := node.checkHealthyAndReturnCode(); !ok { diff --git a/internal/proxy/proxy_test.go b/internal/proxy/proxy_test.go index 0f3fcf166b..25cd2940f5 100644 --- a/internal/proxy/proxy_test.go +++ b/internal/proxy/proxy_test.go @@ -718,12 +718,6 @@ func TestProxy(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - // recreate -> fail - req2 := constructCreateCollectionRequest() - resp, err = proxy.CreateCollection(ctx, req2) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) - reqInvalidField := constructCreateCollectionRequest() schema := constructCollectionSchema() schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ @@ -764,16 +758,6 @@ func TestProxy(t *testing.T) { DbName: dbName, CollectionName: collectionName, }) - - sameAliasReq := &milvuspb.CreateAliasRequest{ - Base: nil, - CollectionName: collectionName, - Alias: "alias", - } - - resp, err = proxy.CreateAlias(ctx, sameAliasReq) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) }) wg.Add(1) @@ -833,15 +817,6 @@ func TestProxy(t *testing.T) { CollectionName: collectionName, }) - sameDropReq := &milvuspb.DropAliasRequest{ - Base: nil, - Alias: "alias", - } - - // Can't drop non-existing alias - resp, err = proxy.DropAlias(ctx, sameDropReq) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) }) wg.Add(1) @@ -950,16 +925,6 @@ func TestProxy(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - // recreate -> fail - resp, err = proxy.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{ - Base: nil, - DbName: dbName, - CollectionName: collectionName, - PartitionName: partitionName, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) - // create partition with non-exist collection -> fail resp, err = proxy.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{ Base: nil, @@ -1683,7 +1648,7 @@ func TestProxy(t *testing.T) { wg.Add(1) t.Run("release collection", func(t *testing.T) { defer wg.Done() - collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName) + _, err := globalMetaCache.GetCollectionID(ctx, collectionName) assert.NoError(t, err) resp, err := proxy.ReleaseCollection(ctx, &milvuspb.ReleaseCollectionRequest{ @@ -1695,16 +1660,6 @@ func TestProxy(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) assert.Equal(t, "", resp.Reason) - // release dql message stream - resp, err = proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{ - Base: nil, - DbID: 0, - CollectionID: collectionID, - }) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - assert.Equal(t, "", resp.Reason) - // release collection cache resp, err = proxy.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{ Base: nil, @@ -1995,24 +1950,6 @@ func TestProxy(t *testing.T) { // drop non-exist partition -> fail - resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{ - Base: nil, - DbName: dbName, - CollectionName: collectionName, - PartitionName: partitionName, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) - - resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{ - Base: nil, - DbName: dbName, - CollectionName: collectionName, - PartitionName: otherCollectionName, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) - resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{ Base: nil, DbName: dbName, @@ -2087,7 +2024,7 @@ func TestProxy(t *testing.T) { wg.Add(1) t.Run("drop collection", func(t *testing.T) { defer wg.Done() - collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName) + _, err := globalMetaCache.GetCollectionID(ctx, collectionName) assert.NoError(t, err) resp, err := proxy.DropCollection(ctx, &milvuspb.DropCollectionRequest{ @@ -2107,15 +2044,6 @@ func TestProxy(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - // release dql stream - resp, err = proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{ - Base: nil, - DbID: 0, - CollectionID: collectionID, - }) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - // release collection load cache resp, err = proxy.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{ Base: nil, @@ -2328,14 +2256,6 @@ func TestProxy(t *testing.T) { proxy.UpdateStateCode(internalpb.StateCode_Abnormal) - wg.Add(1) - t.Run("ReleaseDQLMessageStream fail, unhealthy", func(t *testing.T) { - defer wg.Done() - resp, err := proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{}) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode) - }) - wg.Add(1) t.Run("CreateCollection fail, unhealthy", func(t *testing.T) { defer wg.Done() diff --git a/internal/proxy/rootcoord_mock_test.go b/internal/proxy/rootcoord_mock_test.go index c9f672172a..fc7f602be4 100644 --- a/internal/proxy/rootcoord_mock_test.go +++ b/internal/proxy/rootcoord_mock_test.go @@ -899,20 +899,6 @@ func (coord *RootCoordMock) DescribeSegments(ctx context.Context, req *rootcoord panic("implement me") } -func (coord *RootCoordMock) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - code := coord.state.Load().(internalpb.StateCode) - if code != internalpb.StateCode_Healthy { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - Reason: fmt.Sprintf("state code = %s", internalpb.StateCode_name[int32(code)]), - }, nil - } - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - func (coord *RootCoordMock) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { code := coord.state.Load().(internalpb.StateCode) if code != internalpb.StateCode_Healthy { diff --git a/internal/proxy/task.go b/internal/proxy/task.go index 291116c1aa..81a7b85c19 100644 --- a/internal/proxy/task.go +++ b/internal/proxy/task.go @@ -314,7 +314,9 @@ func (dct *dropCollectionTask) PreExecute(ctx context.Context) error { func (dct *dropCollectionTask) Execute(ctx context.Context) error { collID, err := globalMetaCache.GetCollectionID(ctx, dct.CollectionName) if err != nil { - return err + // make dropping collection idempotent. + dct.result = &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success} + return nil } dct.result, err = dct.rootCoord.DropCollection(ctx, dct.DropCollectionRequest) diff --git a/internal/proxy/task_test.go b/internal/proxy/task_test.go index 31aeb693bb..7476567a4a 100644 --- a/internal/proxy/task_test.go +++ b/internal/proxy/task_test.go @@ -799,7 +799,7 @@ func TestDropCollectionTask(t *testing.T) { return 0, errors.New("mock") }) err = task.Execute(ctx) - assert.Error(t, err) + assert.NoError(t, err) cache.setGetIDFunc(func(ctx context.Context, collectionName string) (typeutil.UniqueID, error) { return 0, nil }) diff --git a/internal/querycoord/mock_3rd_component_test.go b/internal/querycoord/mock_3rd_component_test.go index f19e2d50cc..9b187f1e30 100644 --- a/internal/querycoord/mock_3rd_component_test.go +++ b/internal/querycoord/mock_3rd_component_test.go @@ -224,23 +224,6 @@ func (rc *rootCoordMock) ShowPartitions(ctx context.Context, in *milvuspb.ShowPa }, nil } -func (rc *rootCoordMock) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - if rc.returnGrpcError { - return nil, errors.New("release DQLMessage stream failed") - } - - if rc.returnError { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - Reason: "release DQLMessage stream failed", - }, nil - } - - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil -} - func (rc *rootCoordMock) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { if rc.returnGrpcError { return nil, errors.New("InvalidateCollectionMetaCache failed") diff --git a/internal/rootcoord/alter_alias_task.go b/internal/rootcoord/alter_alias_task.go new file mode 100644 index 0000000000..56b036d15a --- /dev/null +++ b/internal/rootcoord/alter_alias_task.go @@ -0,0 +1,28 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type alterAliasTask struct { + baseTaskV2 + Req *milvuspb.AlterAliasRequest +} + +func (t *alterAliasTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_AlterAlias); err != nil { + return err + } + return nil +} + +func (t *alterAliasTask) Execute(ctx context.Context) error { + if err := t.core.ExpireMetaCache(ctx, []string{t.Req.GetAlias()}, InvalidCollectionID, t.GetTs()); err != nil { + return err + } + // alter alias is atomic enough. + return t.core.meta.AlterAlias(ctx, t.Req.GetAlias(), t.Req.GetCollectionName(), t.GetTs()) +} diff --git a/internal/rootcoord/alter_alias_task_test.go b/internal/rootcoord/alter_alias_task_test.go new file mode 100644 index 0000000000..1cfe9b0aa5 --- /dev/null +++ b/internal/rootcoord/alter_alias_task_test.go @@ -0,0 +1,54 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +func Test_alterAliasTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &alterAliasTask{Req: &milvuspb.AlterAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}} + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &alterAliasTask{Req: &milvuspb.AlterAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias}}} + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_alterAliasTask_Execute(t *testing.T) { + t.Run("failed to expire cache", func(t *testing.T) { + core := newTestCore(withInvalidProxyManager()) + task := &alterAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.AlterAliasRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias}, + Alias: "test", + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to alter alias", func(t *testing.T) { + core := newTestCore(withValidProxyManager(), withInvalidMeta()) + task := &alterAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.AlterAliasRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias}, + Alias: "test", + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) +} diff --git a/internal/rootcoord/broker.go b/internal/rootcoord/broker.go new file mode 100644 index 0000000000..2b98e98389 --- /dev/null +++ b/internal/rootcoord/broker.go @@ -0,0 +1,226 @@ +package rootcoord + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/milvus-io/milvus/internal/proto/indexpb" + + "github.com/milvus-io/milvus/internal/proto/datapb" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/util/funcutil" + "go.uber.org/zap" +) + +type watchInfo struct { + ts Timestamp + collectionID UniqueID + partitionID UniqueID + vChannels []string + startPositions []*commonpb.KeyDataPair +} + +// Broker communicates with other components. +type Broker interface { + ReleaseCollection(ctx context.Context, collectionID UniqueID) error + GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) + + WatchChannels(ctx context.Context, info *watchInfo) error + UnwatchChannels(ctx context.Context, info *watchInfo) error + AddSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error + ReleaseSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error + Flush(ctx context.Context, cID int64, segIDs []int64) error + Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) + + DropCollectionIndex(ctx context.Context, collID UniqueID) error + GetSegmentIndexState(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) +} + +type ServerBroker struct { + s *Core +} + +func newServerBroker(s *Core) *ServerBroker { + return &ServerBroker{s: s} +} + +func (b *ServerBroker) ReleaseCollection(ctx context.Context, collectionID UniqueID) error { + log.Info("releasing collection", zap.Int64("collection", collectionID)) + + if err := funcutil.WaitForComponentHealthy(ctx, b.s.queryCoord, "QueryCoord", 100, time.Millisecond*200); err != nil { + log.Error("failed to release collection, querycoord not healthy", zap.Error(err), zap.Int64("collection", collectionID)) + return err + } + + resp, err := b.s.queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_ReleaseCollection}, + CollectionID: collectionID, + NodeID: b.s.session.ServerID, + }) + if err != nil { + return err + } + + if resp.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("failed to release collection, code: %s, reason: %s", resp.GetErrorCode(), resp.GetReason()) + } + + log.Info("done to release collection", zap.Int64("collection", collectionID)) + return nil +} + +func (b *ServerBroker) GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) { + resp, err := b.s.queryCoord.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_GetSegmentState, + SourceID: b.s.session.ServerID, + }, + CollectionID: collectionID, + SegmentIDs: segIDs, + }) + return resp, err +} + +func toKeyDataPairs(m map[string][]byte) []*commonpb.KeyDataPair { + ret := make([]*commonpb.KeyDataPair, 0, len(m)) + for k, data := range m { + ret = append(ret, &commonpb.KeyDataPair{ + Key: k, + Data: data, + }) + } + return ret +} + +func (b *ServerBroker) WatchChannels(ctx context.Context, info *watchInfo) error { + log.Info("watching channels", zap.Uint64("ts", info.ts), zap.Int64("collection", info.collectionID), zap.Strings("vChannels", info.vChannels)) + + if err := funcutil.WaitForComponentHealthy(ctx, b.s.dataCoord, "DataCoord", 100, time.Millisecond*200); err != nil { + return err + } + + resp, err := b.s.dataCoord.WatchChannels(ctx, &datapb.WatchChannelsRequest{ + CollectionID: info.collectionID, + ChannelNames: info.vChannels, + StartPositions: info.startPositions, + }) + if err != nil { + return err + } + + if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("failed to watch channels, code: %s, reason: %s", resp.GetStatus().GetErrorCode(), resp.GetStatus().GetReason()) + } + + log.Info("done to watch channels", zap.Uint64("ts", info.ts), zap.Int64("collection", info.collectionID), zap.Strings("vChannels", info.vChannels)) + return nil +} + +func (b *ServerBroker) UnwatchChannels(ctx context.Context, info *watchInfo) error { + // TODO: release flowgraph on datanodes. + return nil +} + +func (b *ServerBroker) AddSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error { + log.Info("acquiring seg lock", + zap.Int64s("segment IDs", segIDs), + zap.Int64("node ID", b.s.session.ServerID)) + resp, err := b.s.dataCoord.AcquireSegmentLock(ctx, &datapb.AcquireSegmentLockRequest{ + SegmentIDs: segIDs, + NodeID: b.s.session.ServerID, + TaskID: taskID, + }) + if err != nil { + return err + } + if resp.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("failed to acquire segment lock %s", resp.GetReason()) + } + log.Info("acquire seg lock succeed", + zap.Int64s("segment IDs", segIDs), + zap.Int64("node ID", b.s.session.ServerID)) + return nil +} + +func (b *ServerBroker) ReleaseSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error { + log.Info("releasing seg lock", + zap.Int64s("segment IDs", segIDs), + zap.Int64("node ID", b.s.session.ServerID)) + resp, err := b.s.dataCoord.ReleaseSegmentLock(ctx, &datapb.ReleaseSegmentLockRequest{ + SegmentIDs: segIDs, + NodeID: b.s.session.ServerID, + TaskID: taskID, + }) + if err != nil { + return err + } + if resp.GetErrorCode() != commonpb.ErrorCode_Success { + return fmt.Errorf("failed to release segment lock %s", resp.GetReason()) + } + log.Info("release seg lock succeed", + zap.Int64s("segment IDs", segIDs), + zap.Int64("node ID", b.s.session.ServerID)) + return nil +} + +func (b *ServerBroker) Flush(ctx context.Context, cID int64, segIDs []int64) error { + resp, err := b.s.dataCoord.Flush(ctx, &datapb.FlushRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_Flush, + SourceID: b.s.session.ServerID, + }, + DbID: 0, + SegmentIDs: segIDs, + CollectionID: cID, + }) + if err != nil { + return errors.New("failed to call flush to data coordinator: " + err.Error()) + } + if resp.Status.ErrorCode != commonpb.ErrorCode_Success { + return errors.New(resp.Status.Reason) + } + log.Info("flush on collection succeed", zap.Int64("collection ID", cID)) + return nil +} + +func (b *ServerBroker) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { + return b.s.dataCoord.Import(ctx, req) +} + +func (b *ServerBroker) DropCollectionIndex(ctx context.Context, collID UniqueID) error { + if err := funcutil.WaitForComponentHealthy(ctx, b.s.indexCoord, "IndexCoord", 100, time.Millisecond*100); err != nil { + return err + } + rsp, err := b.s.indexCoord.DropIndex(ctx, &indexpb.DropIndexRequest{ + CollectionID: collID, + IndexName: "", + }) + if err != nil { + return err + } + if rsp.ErrorCode != commonpb.ErrorCode_Success { + return fmt.Errorf(rsp.Reason) + } + return nil +} + +func (b *ServerBroker) GetSegmentIndexState(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) { + resp, err := b.s.indexCoord.GetSegmentIndexState(ctx, &indexpb.GetSegmentIndexStateRequest{ + CollectionID: collID, + IndexName: indexName, + SegmentIDs: segIDs, + }) + if err != nil { + return nil, err + } + if resp.Status.ErrorCode != commonpb.ErrorCode_Success { + return nil, errors.New(resp.Status.Reason) + } + + return resp.GetStates(), nil +} diff --git a/internal/rootcoord/broker_test.go b/internal/rootcoord/broker_test.go new file mode 100644 index 0000000000..75cd975072 --- /dev/null +++ b/internal/rootcoord/broker_test.go @@ -0,0 +1,302 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/proto/indexpb" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/stretchr/testify/assert" +) + +func TestServerBroker_ReleaseCollection(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withUnhealthyQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseCollection(ctx, 1) + assert.Error(t, err) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseCollection(ctx, 1) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseCollection(ctx, 1) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseCollection(ctx, 1) + assert.NoError(t, err) + }) +} + +func TestServerBroker_GetSegmentInfo(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + _, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + resp, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode()) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidQueryCoord()) + b := newServerBroker(c) + ctx := context.Background() + resp, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestServerBroker_WatchChannels(t *testing.T) { + t.Run("unhealthy", func(t *testing.T) { + c := newTestCore(withUnhealthyDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.WatchChannels(ctx, &watchInfo{}) + assert.Error(t, err) + }) + + t.Run("failed to execute", func(t *testing.T) { + defer cleanTestEnv() + + c := newTestCore(withInvalidDataCoord(), withRocksMqTtSynchronizer()) + b := newServerBroker(c) + ctx := context.Background() + err := b.WatchChannels(ctx, &watchInfo{}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + defer cleanTestEnv() + + c := newTestCore(withFailedDataCoord(), withRocksMqTtSynchronizer()) + b := newServerBroker(c) + ctx := context.Background() + err := b.WatchChannels(ctx, &watchInfo{}) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + defer cleanTestEnv() + + c := newTestCore(withValidDataCoord(), withRocksMqTtSynchronizer()) + b := newServerBroker(c) + ctx := context.Background() + err := b.WatchChannels(ctx, &watchInfo{}) + assert.NoError(t, err) + }) +} + +func TestServerBroker_UnwatchChannels(t *testing.T) { + // TODO: implement + b := newServerBroker(newTestCore()) + ctx := context.Background() + b.UnwatchChannels(ctx, &watchInfo{}) +} + +func TestServerBroker_AddSegRefLock(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.AddSegRefLock(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.AddSegRefLock(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.AddSegRefLock(ctx, 1, []int64{1, 2}) + assert.NoError(t, err) + }) +} + +func TestServerBroker_ReleaseSegRefLock(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2}) + assert.NoError(t, err) + }) +} + +func TestServerBroker_Flush(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.Flush(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.Flush(ctx, 1, []int64{1, 2}) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.Flush(ctx, 1, []int64{1, 2}) + assert.NoError(t, err) + }) +} + +func TestServerBroker_Import(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + resp, err := b.Import(ctx, &datapb.ImportTaskRequest{}) + assert.Error(t, err) + assert.Nil(t, resp) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + resp, err := b.Import(ctx, &datapb.ImportTaskRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode()) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidDataCoord()) + b := newServerBroker(c) + ctx := context.Background() + resp, err := b.Import(ctx, &datapb.ImportTaskRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestServerBroker_DropCollectionIndex(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withUnhealthyIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.DropCollectionIndex(ctx, 1) + assert.Error(t, err) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.DropCollectionIndex(ctx, 1) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.DropCollectionIndex(ctx, 1) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + err := b.DropCollectionIndex(ctx, 1) + assert.NoError(t, err) + }) +} + +func TestServerBroker_GetSegmentIndexState(t *testing.T) { + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withInvalidIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + _, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1, 2}) + assert.Error(t, err) + }) + + t.Run("non success error code on execute", func(t *testing.T) { + c := newTestCore(withFailedIndexCoord()) + b := newServerBroker(c) + ctx := context.Background() + _, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1, 2}) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + c := newTestCore(withValidIndexCoord()) + c.indexCoord.(*mockIndexCoord).GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { + return &indexpb.GetSegmentIndexStateResponse{ + Status: succStatus(), + States: []*indexpb.SegmentIndexState{ + { + SegmentID: 1, + State: commonpb.IndexState_Finished, + FailReason: "", + }, + }, + }, nil + } + b := newServerBroker(c) + ctx := context.Background() + states, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1}) + assert.NoError(t, err) + assert.Equal(t, 1, len(states)) + assert.Equal(t, commonpb.IndexState_Finished, states[0].GetState()) + }) +} diff --git a/internal/rootcoord/constrant.go b/internal/rootcoord/constrant.go new file mode 100644 index 0000000000..bf09dacbcb --- /dev/null +++ b/internal/rootcoord/constrant.go @@ -0,0 +1,11 @@ +package rootcoord + +const ( + // TODO: better to make them configurable, use default value if no config was set since we never explode these before. + snapshotsSep = "_ts" + snapshotPrefix = "snapshots" + globalIDAllocatorKey = "idTimestamp" + globalIDAllocatorSubPath = "gid" + globalTSOAllocatorKey = "timestamp" + globalTSOAllocatorSubPath = "tso" +) diff --git a/internal/rootcoord/create_alias_task.go b/internal/rootcoord/create_alias_task.go new file mode 100644 index 0000000000..89eb90af33 --- /dev/null +++ b/internal/rootcoord/create_alias_task.go @@ -0,0 +1,26 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type createAliasTask struct { + baseTaskV2 + Req *milvuspb.CreateAliasRequest +} + +func (t *createAliasTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreateAlias); err != nil { + return err + } + return nil +} + +func (t *createAliasTask) Execute(ctx context.Context) error { + // create alias is atomic enough. + return t.core.meta.CreateAlias(ctx, t.Req.GetAlias(), t.Req.GetCollectionName(), t.GetTs()) +} diff --git a/internal/rootcoord/create_alias_task_test.go b/internal/rootcoord/create_alias_task_test.go new file mode 100644 index 0000000000..eea45f9b9e --- /dev/null +++ b/internal/rootcoord/create_alias_task_test.go @@ -0,0 +1,40 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +func Test_createAliasTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &createAliasTask{Req: &milvuspb.CreateAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}} + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &createAliasTask{Req: &milvuspb.CreateAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateAlias}}} + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_createAliasTask_Execute(t *testing.T) { + t.Run("failed to create alias", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &createAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateAliasRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateAlias}, + Alias: "test", + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) +} diff --git a/internal/rootcoord/create_collection_task.go b/internal/rootcoord/create_collection_task.go new file mode 100644 index 0000000000..1405e42d69 --- /dev/null +++ b/internal/rootcoord/create_collection_task.go @@ -0,0 +1,294 @@ +package rootcoord + +import ( + "context" + "errors" + "fmt" + + ms "github.com/milvus-io/milvus/internal/mq/msgstream" + "github.com/milvus-io/milvus/internal/proto/internalpb" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/util/typeutil" + "go.uber.org/zap" + + "github.com/golang/protobuf/proto" + + "github.com/milvus-io/milvus/internal/proto/schemapb" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type collectionChannels struct { + virtualChannels []string + physicalChannels []string +} + +type createCollectionTask struct { + baseTaskV2 + Req *milvuspb.CreateCollectionRequest + schema *schemapb.CollectionSchema + collID UniqueID + partID UniqueID + channels collectionChannels +} + +func (t *createCollectionTask) validate() error { + if t.Req == nil { + return errors.New("empty requests") + } + + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreateCollection); err != nil { + return err + } + + return nil +} + +func hasSystemFields(schema *schemapb.CollectionSchema, systemFields []string) bool { + for _, f := range schema.GetFields() { + if funcutil.SliceContain(systemFields, f.GetName()) { + return true + } + } + return false +} + +func (t *createCollectionTask) validateSchema(schema *schemapb.CollectionSchema) error { + if t.Req.GetCollectionName() != schema.GetName() { + return fmt.Errorf("collection name = %s, schema.Name=%s", t.Req.GetCollectionName(), schema.Name) + } + if hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName}) { + return fmt.Errorf("schema contains system field: %s, %s", RowIDFieldName, TimeStampFieldName) + } + return nil +} + +func (t *createCollectionTask) assignFieldID(schema *schemapb.CollectionSchema) { + for idx := range schema.GetFields() { + schema.Fields[idx].FieldID = int64(idx + StartOfUserFieldID) + } +} + +func (t *createCollectionTask) appendSysFields(schema *schemapb.CollectionSchema) { + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: int64(RowIDField), + Name: RowIDFieldName, + IsPrimaryKey: false, + Description: "row id", + DataType: schemapb.DataType_Int64, + }) + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: int64(TimeStampField), + Name: TimeStampFieldName, + IsPrimaryKey: false, + Description: "time stamp", + DataType: schemapb.DataType_Int64, + }) +} + +func (t *createCollectionTask) prepareSchema() error { + var schema schemapb.CollectionSchema + if err := proto.Unmarshal(t.Req.GetSchema(), &schema); err != nil { + return err + } + if err := t.validateSchema(&schema); err != nil { + return err + } + t.assignFieldID(&schema) + t.appendSysFields(&schema) + t.schema = &schema + return nil +} + +func (t *createCollectionTask) assignShardsNum() { + if t.Req.GetShardsNum() <= 0 { + t.Req.ShardsNum = 2 + } +} + +func (t *createCollectionTask) assignCollectionID() error { + var err error + t.collID, err = t.core.idAllocator.AllocOne() + return err +} + +func (t *createCollectionTask) assignPartitionID() error { + var err error + t.partID, err = t.core.idAllocator.AllocOne() + return err +} + +func (t *createCollectionTask) assignChannels() error { + vchanNames := make([]string, t.Req.ShardsNum) + //physical channel names + chanNames := t.core.chanTimeTick.getDmlChannelNames(int(t.Req.ShardsNum)) + + for i := int32(0); i < t.Req.ShardsNum; i++ { + vchanNames[i] = fmt.Sprintf("%s_%dv%d", chanNames[i], t.collID, i) + } + t.channels = collectionChannels{ + virtualChannels: vchanNames, + physicalChannels: chanNames, + } + return nil +} + +func (t *createCollectionTask) Prepare(ctx context.Context) error { + if err := t.validate(); err != nil { + return err + } + + if err := t.prepareSchema(); err != nil { + return err + } + + t.assignShardsNum() + + if err := t.assignCollectionID(); err != nil { + return err + } + + if err := t.assignPartitionID(); err != nil { + return err + } + + return t.assignChannels() +} + +func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.MsgPack { + ts := t.GetTs() + collectionID := t.collID + partitionID := t.partID + // error won't happen here. + marshaledSchema, _ := proto.Marshal(t.schema) + pChannels := t.channels.physicalChannels + vChannels := t.channels.virtualChannels + + msgPack := ms.MsgPack{} + baseMsg := ms.BaseMsg{ + Ctx: ctx, + BeginTimestamp: ts, + EndTimestamp: ts, + HashValues: []uint32{0}, + } + msg := &ms.CreateCollectionMsg{ + BaseMsg: baseMsg, + CreateCollectionRequest: internalpb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection, Timestamp: ts}, + CollectionID: collectionID, + PartitionID: partitionID, + Schema: marshaledSchema, + VirtualChannelNames: vChannels, + PhysicalChannelNames: pChannels, + }, + } + msgPack.Msgs = append(msgPack.Msgs, msg) + return &msgPack +} + +func (t *createCollectionTask) addChannelsAndGetStartPositions(ctx context.Context) (map[string][]byte, error) { + t.core.chanTimeTick.addDmlChannels(t.channels.physicalChannels...) + msg := t.genCreateCollectionMsg(ctx) + return t.core.chanTimeTick.broadcastMarkDmlChannels(t.channels.physicalChannels, msg) +} + +func (t *createCollectionTask) Execute(ctx context.Context) error { + collID := t.collID + partID := t.partID + ts := t.GetTs() + + vchanNames := t.channels.virtualChannels + chanNames := t.channels.physicalChannels + + startPositions, err := t.addChannelsAndGetStartPositions(ctx) + if err != nil { + // ugly here, since we must get start positions first. + t.core.chanTimeTick.removeDmlChannels(t.channels.physicalChannels...) + return err + } + + collInfo := model.Collection{ + CollectionID: collID, + Name: t.schema.Name, + Description: t.schema.Description, + AutoID: t.schema.AutoID, + Fields: model.UnmarshalFieldModels(t.schema.Fields), + VirtualChannelNames: vchanNames, + PhysicalChannelNames: chanNames, + ShardsNum: t.Req.ShardsNum, + ConsistencyLevel: t.Req.ConsistencyLevel, + StartPositions: toKeyDataPairs(startPositions), + CreateTime: ts, + State: pb.CollectionState_CollectionCreating, + Partitions: []*model.Partition{ + { + PartitionID: partID, + PartitionName: Params.CommonCfg.DefaultPartitionName, + PartitionCreatedTimestamp: ts, + CollectionID: collID, + State: pb.PartitionState_PartitionCreated, + }, + }, + } + + // We cannot check the idempotency inside meta table when adding collection, since we'll execute duplicate steps + // if add collection successfully due to idempotency check. Some steps may be risky to be duplicate executed if they + // are not promised idempotent. + clone := collInfo.Clone() + clone.Partitions = []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName}} + // need double check in meta table if we can't promise the sequence execution. + existedCollInfo, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp) + if err == nil { + equal := existedCollInfo.Equal(*clone) + if !equal { + return fmt.Errorf("create duplicate collection with different parameters, collection: %s", t.Req.GetCollectionName()) + } + // make creating collection idempotent. + log.Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", t.GetTs())) + return nil + } + + undoTask := newBaseUndoTask() + undoTask.AddStep(&NullStep{}, &RemoveDmlChannelsStep{ + baseStep: baseStep{core: t.core}, + pchannels: chanNames, + }) // remove dml channels if any error occurs. + undoTask.AddStep(&AddCollectionMetaStep{ + baseStep: baseStep{core: t.core}, + coll: &collInfo, + }, &DeleteCollectionMetaStep{ + baseStep: baseStep{core: t.core}, + collectionID: collID, + ts: ts, + }) + undoTask.AddStep(&WatchChannelsStep{ + baseStep: baseStep{core: t.core}, + info: &watchInfo{ + ts: ts, + collectionID: collID, + vChannels: t.channels.virtualChannels, + startPositions: toKeyDataPairs(startPositions), + }, + }, &UnwatchChannelsStep{ + baseStep: baseStep{core: t.core}, + collectionID: collID, + channels: t.channels, + }) + undoTask.AddStep(&ChangeCollectionStateStep{ + baseStep: baseStep{core: t.core}, + collectionID: collID, + state: pb.CollectionState_CollectionCreated, + ts: ts, + }, &NullStep{}) // We'll remove the whole collection anyway. + + return undoTask.Execute(ctx) +} diff --git a/internal/rootcoord/create_collection_task_test.go b/internal/rootcoord/create_collection_task_test.go new file mode 100644 index 0000000000..34e415b968 --- /dev/null +++ b/internal/rootcoord/create_collection_task_test.go @@ -0,0 +1,502 @@ +package rootcoord + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/proto/internalpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/milvus-io/milvus/internal/proto/schemapb" + "github.com/milvus-io/milvus/internal/util/funcutil" + "github.com/stretchr/testify/assert" +) + +func Test_createCollectionTask_validate(t *testing.T) { + t.Run("empty request", func(t *testing.T) { + task := createCollectionTask{ + Req: nil, + } + err := task.validate() + assert.Error(t, err) + }) + + t.Run("invalid msg type", func(t *testing.T) { + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + }, + } + err := task.validate() + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + }, + } + err := task.validate() + assert.NoError(t, err) + }) +} + +func Test_createCollectionTask_validateSchema(t *testing.T) { + t.Run("name mismatch", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + otherName := collectionName + "_other" + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + }, + } + schema := &schemapb.CollectionSchema{ + Name: otherName, + } + err := task.validateSchema(schema) + assert.Error(t, err) + }) + + t.Run("has system fields", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + }, + } + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Fields: []*schemapb.FieldSchema{ + {Name: RowIDFieldName}, + }, + } + err := task.validateSchema(schema) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + }, + } + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Fields: []*schemapb.FieldSchema{}, + } + err := task.validateSchema(schema) + assert.NoError(t, err) + }) +} + +func Test_createCollectionTask_prepareSchema(t *testing.T) { + t.Run("failed to unmarshal", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: []byte("invalid schema"), + }, + } + err := task.prepareSchema() + assert.Error(t, err) + }) + + t.Run("contain system fields", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: TimeStampFieldName}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + }, + } + err = task.prepareSchema() + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: field1}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + task := createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + }, + } + err = task.prepareSchema() + assert.NoError(t, err) + }) +} + +func Test_createCollectionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("invalid schema", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + task := &createCollectionTask{ + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: []byte("invalid schema"), + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to assign id", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: field1}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + + core := newTestCore(withInvalidIDAllocator()) + + task := createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + }, + } + err = task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to assign channels", func(t *testing.T) { + // TODO: error won't happen here. + }) + + t.Run("normal case", func(t *testing.T) { + defer cleanTestEnv() + + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + + ticker := newRocksMqTtSynchronizer() + + core := newTestCore(withValidIDAllocator(), withTtSynchronizer(ticker)) + + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: field1}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + + task := createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + }, + } + err = task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_createCollectionTask_Execute(t *testing.T) { + t.Run("add same collection with different parameters", func(t *testing.T) { + defer cleanTestEnv() + ticker := newRocksMqTtSynchronizer() + + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName} + + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll, nil + } + + core := newTestCore(withMeta(meta), withTtSynchronizer(ticker)) + + task := &createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + }, + schema: &schemapb.CollectionSchema{Name: collectionName, Fields: []*schemapb.FieldSchema{{Name: field1}}}, + } + + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("add duplicate collection", func(t *testing.T) { + defer cleanTestEnv() + ticker := newRocksMqTtSynchronizer() + shardNum := 2 + pchans := ticker.getDmlChannelNames(shardNum) + + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + collID := UniqueID(1) + schema := &schemapb.CollectionSchema{Name: collectionName, Fields: []*schemapb.FieldSchema{{Name: field1}}} + channels := collectionChannels{ + virtualChannels: []string{funcutil.GenRandomStr(), funcutil.GenRandomStr()}, + physicalChannels: pchans, + } + coll := &model.Collection{ + CollectionID: collID, + Name: schema.Name, + Description: schema.Description, + AutoID: schema.AutoID, + Fields: model.UnmarshalFieldModels(schema.GetFields()), + VirtualChannelNames: channels.virtualChannels, + PhysicalChannelNames: channels.physicalChannels, + Partitions: []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName}}, + } + + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll, nil + } + + core := newTestCore(withMeta(meta), withTtSynchronizer(ticker)) + + task := &createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + }, + collID: collID, + schema: schema, + channels: channels, + } + + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) + + t.Run("failed to get start positions", func(t *testing.T) { + ticker := newTickerWithMockFailStream() + shardNum := 2 + pchans := ticker.getDmlChannelNames(shardNum) + core := newTestCore(withTtSynchronizer(ticker)) + task := &createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + channels: collectionChannels{ + physicalChannels: pchans, + virtualChannels: []string{funcutil.GenRandomStr(), funcutil.GenRandomStr()}, + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + defer cleanTestEnv() + + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + shardNum := 2 + + ticker := newRocksMqTtSynchronizer() + pchans := ticker.getDmlChannelNames(shardNum) + + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return nil, errors.New("error mock GetCollectionByName") + } + meta.AddCollectionFunc = func(ctx context.Context, coll *model.Collection) error { + return nil + } + meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error { + return nil + } + + dc := newMockDataCoord() + dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{ + NodeID: TestRootCoordID, + StateCode: internalpb.StateCode_Healthy, + }, + SubcomponentStates: nil, + Status: succStatus(), + }, nil + } + dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return &datapb.WatchChannelsResponse{Status: succStatus()}, nil + } + + core := newTestCore(withValidIDAllocator(), + withMeta(meta), + withTtSynchronizer(ticker), + withDataCoord(dc)) + core.broker = newServerBroker(core) + + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: field1}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + + task := createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + ShardsNum: int32(shardNum), + }, + channels: collectionChannels{physicalChannels: pchans}, + schema: schema, + } + + err = task.Execute(context.Background()) + assert.NoError(t, err) + }) + + t.Run("partial error, check if undo worked", func(t *testing.T) { + defer cleanTestEnv() + + collectionName := funcutil.GenRandomStr() + field1 := funcutil.GenRandomStr() + shardNum := 2 + + ticker := newRocksMqTtSynchronizer() + pchans := ticker.getDmlChannelNames(shardNum) + + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return nil, errors.New("error mock GetCollectionByName") + } + meta.AddCollectionFunc = func(ctx context.Context, coll *model.Collection) error { + return nil + } + // inject error here. + meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error { + return errors.New("error mock ChangeCollectionState") + } + removeCollectionCalled := false + removeCollectionChan := make(chan struct{}, 1) + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + removeCollectionCalled = true + removeCollectionChan <- struct{}{} + return nil + } + + broker := newMockBroker() + broker.WatchChannelsFunc = func(ctx context.Context, info *watchInfo) error { + return nil + } + unwatchChannelsCalled := false + unwatchChannelsChan := make(chan struct{}, 1) + broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error { + unwatchChannelsCalled = true + unwatchChannelsChan <- struct{}{} + return nil + } + + core := newTestCore(withValidIDAllocator(), + withMeta(meta), + withTtSynchronizer(ticker), + withBroker(broker)) + + schema := &schemapb.CollectionSchema{ + Name: collectionName, + Description: "", + AutoID: false, + Fields: []*schemapb.FieldSchema{ + {Name: field1}, + }, + } + marshaledSchema, err := proto.Marshal(schema) + assert.NoError(t, err) + + task := createCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreateCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, + CollectionName: collectionName, + Schema: marshaledSchema, + ShardsNum: int32(shardNum), + }, + channels: collectionChannels{physicalChannels: pchans}, + schema: schema, + } + + err = task.Execute(context.Background()) + assert.Error(t, err) + + // check if undo worked. + + // undo watch. + <-unwatchChannelsChan + assert.True(t, unwatchChannelsCalled) + + // undo adding collection. + <-removeCollectionChan + assert.True(t, removeCollectionCalled) + + time.Sleep(time.Second * 2) // wait for asynchronous step done. + // undo add channels. + assert.Zero(t, len(ticker.listDmlChannels())) + }) +} diff --git a/internal/rootcoord/create_partition_task.go b/internal/rootcoord/create_partition_task.go new file mode 100644 index 0000000000..7922d3c786 --- /dev/null +++ b/internal/rootcoord/create_partition_task.go @@ -0,0 +1,69 @@ +package rootcoord + +import ( + "context" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/metastore/model" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type createPartitionTask struct { + baseTaskV2 + Req *milvuspb.CreatePartitionRequest + collMeta *model.Collection +} + +func (t *createPartitionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreatePartition); err != nil { + return err + } + collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.GetTs()) + if err != nil { + return err + } + t.collMeta = collMeta + return nil +} + +func (t *createPartitionTask) Execute(ctx context.Context) error { + for _, partition := range t.collMeta.Partitions { + if partition.PartitionName == t.Req.GetPartitionName() { + log.Warn("add duplicate partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName()), zap.Uint64("ts", t.GetTs())) + return nil + } + } + + partID, err := t.core.idAllocator.AllocOne() + if err != nil { + return err + } + partition := &model.Partition{ + PartitionID: partID, + PartitionName: t.Req.GetPartitionName(), + PartitionCreatedTimestamp: t.GetTs(), + Extra: nil, + CollectionID: t.collMeta.CollectionID, + State: pb.PartitionState_PartitionCreated, + } + + undoTask := newBaseUndoTask() + undoTask.AddStep(&ExpireCacheStep{ + baseStep: baseStep{core: t.core}, + collectionNames: []string{t.Req.GetCollectionName()}, + collectionID: t.collMeta.CollectionID, + ts: t.GetTs(), + }, &NullStep{}) + undoTask.AddStep(&AddPartitionMetaStep{ + baseStep: baseStep{core: t.core}, + partition: partition, + }, &NullStep{}) // adding partition is atomic enough. + + return undoTask.Execute(ctx) +} diff --git a/internal/rootcoord/create_partition_task_test.go b/internal/rootcoord/create_partition_task_test.go new file mode 100644 index 0000000000..9227bd8603 --- /dev/null +++ b/internal/rootcoord/create_partition_task_test.go @@ -0,0 +1,126 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +func Test_createPartitionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &createPartitionTask{ + Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to get collection meta", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}}, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + meta := newMockMetaTable() + collectionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName} + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll.Clone(), nil + } + core := newTestCore(withMeta(meta)) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}}, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + assert.True(t, coll.Equal(*task.collMeta)) + }) +} + +func Test_createPartitionTask_Execute(t *testing.T) { + t.Run("create duplicate partition", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}} + task := &createPartitionTask{ + collMeta: coll, + Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) + + t.Run("failed to allocate partition id", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}} + core := newTestCore(withInvalidIDAllocator()) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + collMeta: coll, + Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to expire cache", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}} + core := newTestCore(withValidIDAllocator(), withInvalidProxyManager()) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + collMeta: coll, + Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to add partition meta", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}} + core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withInvalidMeta()) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + collMeta: coll, + Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}} + meta := newMockMetaTable() + meta.AddPartitionFunc = func(ctx context.Context, partition *model.Partition) error { + return nil + } + core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withMeta(meta)) + task := &createPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + collMeta: coll, + Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) +} diff --git a/internal/rootcoord/describe_collection_task.go b/internal/rootcoord/describe_collection_task.go new file mode 100644 index 0000000000..1d22dc9bd6 --- /dev/null +++ b/internal/rootcoord/describe_collection_task.go @@ -0,0 +1,73 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/milvus-io/milvus/internal/proto/schemapb" + "github.com/milvus-io/milvus/internal/util/tsoutil" + "github.com/milvus-io/milvus/internal/util/typeutil" +) + +// describeCollectionTask describe collection request task +type describeCollectionTask struct { + baseTaskV2 + Req *milvuspb.DescribeCollectionRequest + Rsp *milvuspb.DescribeCollectionResponse +} + +func (t *describeCollectionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_DescribeCollection); err != nil { + return err + } + return nil +} + +// Execute task execution +func (t *describeCollectionTask) Execute(ctx context.Context) (err error) { + var collInfo *model.Collection + t.Rsp.Status = succStatus() + + if t.Req.GetTimeStamp() == 0 { + t.Req.TimeStamp = typeutil.MaxTimestamp + } + + if t.Req.GetCollectionName() != "" { + collInfo, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp()) + if err != nil { + t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error()) + return err + } + } else { + collInfo, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetCollectionID(), t.Req.GetTimeStamp()) + if err != nil { + t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error()) + return err + } + } + + t.Rsp.Schema = &schemapb.CollectionSchema{ + Name: collInfo.Name, + Description: collInfo.Description, + AutoID: collInfo.AutoID, + Fields: model.MarshalFieldModels(collInfo.Fields), + } + t.Rsp.CollectionID = collInfo.CollectionID + t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames + t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames + if collInfo.ShardsNum == 0 { + collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames)) + } + t.Rsp.ShardsNum = collInfo.ShardsNum + t.Rsp.ConsistencyLevel = collInfo.ConsistencyLevel + + t.Rsp.CreatedTimestamp = collInfo.CreateTime + createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime) + t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime) + t.Rsp.Aliases = t.core.meta.ListAliasesByID(collInfo.CollectionID) + t.Rsp.StartPositions = collInfo.StartPositions + t.Rsp.CollectionName = t.Rsp.Schema.Name + return nil +} diff --git a/internal/rootcoord/describe_collection_task_test.go b/internal/rootcoord/describe_collection_task_test.go new file mode 100644 index 0000000000..08845424e6 --- /dev/null +++ b/internal/rootcoord/describe_collection_task_test.go @@ -0,0 +1,115 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_describeCollectionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &describeCollectionTask{ + Req: &milvuspb.DescribeCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DropCollection, + }, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &describeCollectionTask{ + Req: &milvuspb.DescribeCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DescribeCollection, + }, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_describeCollectionTask_Execute(t *testing.T) { + t.Run("failed to get collection by name", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &describeCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.DescribeCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DescribeCollection, + }, + CollectionName: "test coll", + }, + Rsp: &milvuspb.DescribeCollectionResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists) + }) + + t.Run("failed to get collection by id", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &describeCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.DescribeCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DescribeCollection, + }, + CollectionID: 1, + }, + Rsp: &milvuspb.DescribeCollectionResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists) + }) + + t.Run("success", func(t *testing.T) { + meta := newMockMetaTable() + meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) { + return &model.Collection{ + CollectionID: 1, + Name: "test coll", + }, nil + } + alias1, alias2 := funcutil.GenRandomStr(), funcutil.GenRandomStr() + meta.ListAliasesByIDFunc = func(collID UniqueID) []string { + return []string{alias1, alias2} + } + + core := newTestCore(withMeta(meta)) + task := &describeCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.DescribeCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DescribeCollection, + }, + CollectionID: 1, + }, + Rsp: &milvuspb.DescribeCollectionResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success) + assert.ElementsMatch(t, []string{alias1, alias2}, task.Rsp.GetAliases()) + }) +} diff --git a/internal/rootcoord/drop_alias_task.go b/internal/rootcoord/drop_alias_task.go new file mode 100644 index 0000000000..af5f966a78 --- /dev/null +++ b/internal/rootcoord/drop_alias_task.go @@ -0,0 +1,29 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type dropAliasTask struct { + baseTaskV2 + Req *milvuspb.DropAliasRequest +} + +func (t *dropAliasTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropAlias); err != nil { + return err + } + return nil +} + +func (t *dropAliasTask) Execute(ctx context.Context) error { + // drop alias is atomic enough. + if err := t.core.ExpireMetaCache(ctx, []string{t.Req.GetAlias()}, InvalidCollectionID, t.GetTs()); err != nil { + return err + } + return t.core.meta.DropAlias(ctx, t.Req.GetAlias(), t.GetTs()) +} diff --git a/internal/rootcoord/drop_alias_task_test.go b/internal/rootcoord/drop_alias_task_test.go new file mode 100644 index 0000000000..ee834f1a46 --- /dev/null +++ b/internal/rootcoord/drop_alias_task_test.go @@ -0,0 +1,82 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +func Test_dropAliasTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &dropAliasTask{ + Req: &milvuspb.DropAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &dropAliasTask{ + Req: &milvuspb.DropAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias}}, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_dropAliasTask_Execute(t *testing.T) { + t.Run("failed to expire cache", func(t *testing.T) { + core := newTestCore(withInvalidProxyManager()) + alias := funcutil.GenRandomStr() + task := &dropAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropAliasRequest{ + + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias}, + Alias: alias, + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to drop alias", func(t *testing.T) { + core := newTestCore(withValidProxyManager(), withInvalidMeta()) + alias := funcutil.GenRandomStr() + task := &dropAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropAliasRequest{ + + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias}, + Alias: alias, + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + meta := newMockMetaTable() + meta.DropAliasFunc = func(ctx context.Context, alias string, ts Timestamp) error { + return nil + } + core := newTestCore(withValidProxyManager(), withMeta(meta)) + alias := funcutil.GenRandomStr() + task := &dropAliasTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropAliasRequest{ + + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias}, + Alias: alias, + }, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) +} diff --git a/internal/rootcoord/drop_collection_task.go b/internal/rootcoord/drop_collection_task.go new file mode 100644 index 0000000000..4a47407cbb --- /dev/null +++ b/internal/rootcoord/drop_collection_task.go @@ -0,0 +1,94 @@ +package rootcoord + +import ( + "context" + "fmt" + + "github.com/milvus-io/milvus/internal/log" + "go.uber.org/zap" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type dropCollectionTask struct { + baseTaskV2 + Req *milvuspb.DropCollectionRequest +} + +func (t *dropCollectionTask) validate() error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropCollection); err != nil { + return err + } + if t.core.meta.IsAlias(t.Req.GetCollectionName()) { + return fmt.Errorf("cannot drop the collection via alias = %s", t.Req.CollectionName) + } + return nil +} + +func (t *dropCollectionTask) Prepare(ctx context.Context) error { + return t.validate() +} + +func (t *dropCollectionTask) Execute(ctx context.Context) error { + // use max ts to check if latest collection exists. + // we cannot handle case that + // dropping collection with `ts1` but a collection exists in catalog with newer ts which is bigger than `ts1`. + // fortunately, if ddls are promised to execute in sequence, then everything is OK. The `ts1` will always be latest. + collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp) + if err != nil { + // make dropping collection idempotent. + log.Warn("drop non-existent collection", zap.String("collection", t.Req.GetCollectionName())) + return nil + } + + // meta cache of all aliases should also be cleaned. + aliases := t.core.meta.ListAliasesByID(collMeta.CollectionID) + + ts := t.GetTs() + + redoTask := newBaseRedoTask() + + redoTask.AddSyncStep(&ExpireCacheStep{ + baseStep: baseStep{core: t.core}, + collectionNames: append(aliases, collMeta.Name), + collectionID: collMeta.CollectionID, + ts: ts, + }) + redoTask.AddSyncStep(&ChangeCollectionStateStep{ + baseStep: baseStep{core: t.core}, + collectionID: collMeta.CollectionID, + state: pb.CollectionState_CollectionDropping, + ts: ts, + }) + + redoTask.AddAsyncStep(&ReleaseCollectionStep{ + baseStep: baseStep{core: t.core}, + collectionID: collMeta.CollectionID, + }) + redoTask.AddAsyncStep(&DropIndexStep{ + baseStep: baseStep{core: t.core}, + collID: collMeta.CollectionID, + }) + redoTask.AddAsyncStep(&DeleteCollectionDataStep{ + baseStep: baseStep{core: t.core}, + coll: collMeta, + ts: ts, + }) + redoTask.AddAsyncStep(&RemoveDmlChannelsStep{ + baseStep: baseStep{core: t.core}, + pchannels: collMeta.PhysicalChannelNames, + }) + redoTask.AddAsyncStep(&DeleteCollectionMetaStep{ + baseStep: baseStep{core: t.core}, + collectionID: collMeta.CollectionID, + ts: ts, + }) + + return redoTask.Execute(ctx) +} diff --git a/internal/rootcoord/drop_collection_task_test.go b/internal/rootcoord/drop_collection_task_test.go new file mode 100644 index 0000000000..fb007b45f5 --- /dev/null +++ b/internal/rootcoord/drop_collection_task_test.go @@ -0,0 +1,216 @@ +package rootcoord + +import ( + "context" + "errors" + "testing" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_dropCollectionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &dropCollectionTask{ + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DescribeCollection}, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("drop via alias", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + meta := newMockMetaTable() + meta.IsAliasFunc = func(name string) bool { + return true + } + core := newTestCore(withMeta(meta)) + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + meta := newMockMetaTable() + meta.IsAliasFunc = func(name string) bool { + return false + } + core := newTestCore(withMeta(meta)) + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_dropCollectionTask_Execute(t *testing.T) { + t.Run("drop non-existent collection", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + core := newTestCore(withInvalidMeta()) + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) + + t.Run("failed to expire cache", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName} + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll.Clone(), nil + } + meta.ListAliasesByIDFunc = func(collID UniqueID) []string { + return []string{} + } + core := newTestCore(withInvalidProxyManager(), withMeta(meta)) + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to change collection state", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName} + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll.Clone(), nil + } + meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error { + return errors.New("error mock ChangeCollectionState") + } + meta.ListAliasesByIDFunc = func(collID UniqueID) []string { + return []string{} + } + core := newTestCore(withValidProxyManager(), withMeta(meta)) + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case, redo", func(t *testing.T) { + defer cleanTestEnv() + + collectionName := funcutil.GenRandomStr() + shardNum := 2 + + ticker := newRocksMqTtSynchronizer() + pchans := ticker.getDmlChannelNames(shardNum) + ticker.addDmlChannels(pchans...) + + coll := &model.Collection{Name: collectionName, ShardsNum: int32(shardNum), PhysicalChannelNames: pchans} + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll.Clone(), nil + } + meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error { + return nil + } + meta.ListAliasesByIDFunc = func(collID UniqueID) []string { + return []string{} + } + removeCollectionMetaCalled := false + removeCollectionMetaChan := make(chan struct{}, 1) + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + removeCollectionMetaCalled = true + removeCollectionMetaChan <- struct{}{} + return nil + } + + broker := newMockBroker() + releaseCollectionCalled := false + releaseCollectionChan := make(chan struct{}, 1) + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + releaseCollectionCalled = true + releaseCollectionChan <- struct{}{} + return nil + } + dropIndexCalled := false + dropIndexChan := make(chan struct{}, 1) + broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error { + dropIndexCalled = true + dropIndexChan <- struct{}{} + return nil + } + + gc := newMockGarbageCollector() + deleteCollectionCalled := false + deleteCollectionChan := make(chan struct{}, 1) + gc.GcCollectionDataFunc = func(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { + deleteCollectionCalled = true + deleteCollectionChan <- struct{}{} + return nil + } + + core := newTestCore( + withValidProxyManager(), + withMeta(meta), + withBroker(broker), + withGarbageCollector(gc), + withTtSynchronizer(ticker)) + + task := &dropCollectionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropCollectionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + CollectionName: collectionName, + }, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + + // check if redo worked. + + <-releaseCollectionChan + assert.True(t, releaseCollectionCalled) + + <-dropIndexChan + assert.True(t, dropIndexCalled) + + <-deleteCollectionChan + assert.True(t, deleteCollectionCalled) + + <-removeCollectionMetaChan + assert.True(t, removeCollectionMetaCalled) + }) +} diff --git a/internal/rootcoord/drop_partition_task.go b/internal/rootcoord/drop_partition_task.go new file mode 100644 index 0000000000..106838c377 --- /dev/null +++ b/internal/rootcoord/drop_partition_task.go @@ -0,0 +1,90 @@ +package rootcoord + +import ( + "context" + "fmt" + + "github.com/milvus-io/milvus/internal/log" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/common" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +type dropPartitionTask struct { + baseTaskV2 + Req *milvuspb.DropPartitionRequest + collMeta *model.Collection +} + +func (t *dropPartitionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropPartition); err != nil { + return err + } + if t.Req.GetPartitionName() == Params.CommonCfg.DefaultPartitionName { + return fmt.Errorf("default partition cannot be deleted") + } + collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.GetTs()) + if err != nil { + // Is this idempotent? + return err + } + t.collMeta = collMeta + return nil +} + +func (t *dropPartitionTask) Execute(ctx context.Context) error { + partID := common.InvalidPartitionID + for _, partition := range t.collMeta.Partitions { + if partition.PartitionName == t.Req.GetPartitionName() { + partID = partition.PartitionID + break + } + } + if partID == common.InvalidPartitionID { + log.Warn("drop an non-existent partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName())) + // make dropping partition idempotent. + return nil + } + + redoTask := newBaseRedoTask() + redoTask.AddSyncStep(&ExpireCacheStep{ + baseStep: baseStep{core: t.core}, + collectionNames: []string{t.Req.GetCollectionName()}, + collectionID: t.collMeta.CollectionID, + ts: t.GetTs(), + }) + redoTask.AddSyncStep(&ChangePartitionStateStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionID: partID, + state: pb.PartitionState_PartitionDropping, + ts: t.GetTs(), + }) + + // TODO: release partition when query coord is ready. + redoTask.AddAsyncStep(&DeletePartitionDataStep{ + baseStep: baseStep{core: t.core}, + pchans: t.collMeta.PhysicalChannelNames, + partition: &model.Partition{ + PartitionID: partID, + PartitionName: t.Req.GetPartitionName(), + CollectionID: t.collMeta.CollectionID, + }, + ts: t.GetTs(), + }) + redoTask.AddAsyncStep(&RemovePartitionMetaStep{ + baseStep: baseStep{core: t.core}, + collectionID: t.collMeta.CollectionID, + partitionID: partID, + ts: t.GetTs(), + }) + + return redoTask.Execute(ctx) +} diff --git a/internal/rootcoord/drop_partition_task_test.go b/internal/rootcoord/drop_partition_task_test.go new file mode 100644 index 0000000000..7f12ac3bce --- /dev/null +++ b/internal/rootcoord/drop_partition_task_test.go @@ -0,0 +1,174 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +func Test_dropPartitionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &dropPartitionTask{ + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("drop default partition", func(t *testing.T) { + task := &dropPartitionTask{ + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + PartitionName: Params.CommonCfg.DefaultPartitionName, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to get collection meta", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &dropPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + Params.InitOnce() + + collectionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName} + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return coll.Clone(), nil + } + core := newTestCore(withMeta(meta)) + task := &dropPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + CollectionName: collectionName, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + assert.True(t, coll.Equal(*task.collMeta)) + }) +} + +func Test_dropPartitionTask_Execute(t *testing.T) { + t.Run("drop non-existent partition", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}} + task := &dropPartitionTask{ + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + CollectionName: collectionName, + PartitionName: partitionName, + }, + collMeta: coll.Clone(), + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + }) + + t.Run("failed to expire cache", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}} + core := newTestCore(withInvalidProxyManager()) + task := &dropPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + CollectionName: collectionName, + PartitionName: partitionName, + }, + collMeta: coll.Clone(), + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("failed to change partition state", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}} + core := newTestCore(withValidProxyManager(), withInvalidMeta()) + task := &dropPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + CollectionName: collectionName, + PartitionName: partitionName, + }, + collMeta: coll.Clone(), + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + collectionName := funcutil.GenRandomStr() + partitionName := funcutil.GenRandomStr() + coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}} + removePartitionMetaCalled := false + removePartitionMetaChan := make(chan struct{}, 1) + meta := newMockMetaTable() + meta.ChangePartitionStateFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state etcdpb.PartitionState, ts Timestamp) error { + return nil + } + meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error { + removePartitionMetaCalled = true + removePartitionMetaChan <- struct{}{} + return nil + } + + gc := newMockGarbageCollector() + deletePartitionCalled := false + deletePartitionChan := make(chan struct{}, 1) + gc.GcPartitionDataFunc = func(ctx context.Context, pChannels []string, coll *model.Partition, ts typeutil.Timestamp) error { + deletePartitionChan <- struct{}{} + deletePartitionCalled = true + return nil + } + + core := newTestCore(withValidProxyManager(), withMeta(meta), withGarbageCollector(gc)) + + task := &dropPartitionTask{ + baseTaskV2: baseTaskV2{core: core}, + Req: &milvuspb.DropPartitionRequest{ + Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition}, + CollectionName: collectionName, + PartitionName: partitionName, + }, + collMeta: coll.Clone(), + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + // check if redo worked. + <-removePartitionMetaChan + assert.True(t, removePartitionMetaCalled) + <-deletePartitionChan + assert.True(t, deletePartitionCalled) + }) +} diff --git a/internal/rootcoord/garbage_collector.go b/internal/rootcoord/garbage_collector.go new file mode 100644 index 0000000000..8c1b2cd093 --- /dev/null +++ b/internal/rootcoord/garbage_collector.go @@ -0,0 +1,158 @@ +package rootcoord + +import ( + "context" + "time" + + ms "github.com/milvus-io/milvus/internal/mq/msgstream" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/internalpb" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/metastore/model" + "go.uber.org/zap" +) + +type GarbageCollector interface { + ReDropCollection(collMeta *model.Collection, ts Timestamp) + RemoveCreatingCollection(collMeta *model.Collection) + ReDropPartition(pChannels []string, partition *model.Partition, ts Timestamp) + GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error + GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error +} + +type GarbageCollectorCtx struct { + s *Core +} + +func newGarbageCollectorCtx(s *Core) *GarbageCollectorCtx { + return &GarbageCollectorCtx{s: s} +} + +func (c *GarbageCollectorCtx) ReDropCollection(collMeta *model.Collection, ts Timestamp) { + // TODO: remove this after data gc can be notified by rpc. + c.s.chanTimeTick.addDmlChannels(collMeta.PhysicalChannelNames...) + defer c.s.chanTimeTick.removeDmlChannels(collMeta.PhysicalChannelNames...) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + if err := c.s.broker.ReleaseCollection(ctx, collMeta.CollectionID); err != nil { + log.Error("failed to release collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID)) + return + } + + if err := c.s.broker.DropCollectionIndex(ctx, collMeta.CollectionID); err != nil { + log.Error("failed to drop collection index when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID)) + return + } + + if err := c.GcCollectionData(ctx, collMeta, ts); err != nil { + log.Error("failed to notify datacoord to gc collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID)) + return + } + + if err := c.s.meta.RemoveCollection(ctx, collMeta.CollectionID, ts); err != nil { + log.Error("failed to remove collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID)) + } +} + +func (c *GarbageCollectorCtx) RemoveCreatingCollection(collMeta *model.Collection) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + if err := c.s.broker.UnwatchChannels(ctx, &watchInfo{collectionID: collMeta.CollectionID, vChannels: collMeta.VirtualChannelNames}); err != nil { + log.Error("failed to unwatch channels when recovery", + zap.Error(err), + zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID), + zap.Strings("vchans", collMeta.VirtualChannelNames), zap.Strings("pchans", collMeta.PhysicalChannelNames)) + return + } + + if err := c.s.meta.RemoveCollection(ctx, collMeta.CollectionID, collMeta.CreateTime); err != nil { + log.Error("failed to remove collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID)) + } +} + +func (c *GarbageCollectorCtx) ReDropPartition(pChannels []string, partition *model.Partition, ts Timestamp) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + // TODO: release partition when query coord is ready. + + // TODO: remove this after data gc can be notified by rpc. + c.s.chanTimeTick.addDmlChannels(pChannels...) + defer c.s.chanTimeTick.removeDmlChannels(pChannels...) + + if err := c.GcPartitionData(ctx, pChannels, partition, ts); err != nil { + log.Error("failed to notify datanodes to gc partition", zap.Error(err)) + return + } + + if err := c.s.meta.RemovePartition(ctx, partition.CollectionID, partition.PartitionID, ts); err != nil { + log.Error("failed to remove partition when recovery", zap.Error(err)) + } +} + +func (c *GarbageCollectorCtx) GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { + msgPack := ms.MsgPack{} + baseMsg := ms.BaseMsg{ + Ctx: ctx, + BeginTimestamp: ts, + EndTimestamp: ts, + HashValues: []uint32{0}, + } + msg := &ms.DropCollectionMsg{ + BaseMsg: baseMsg, + DropCollectionRequest: internalpb.DropCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DropCollection, + Timestamp: ts, + SourceID: c.s.session.ServerID, + }, + CollectionName: coll.Name, + CollectionID: coll.CollectionID, + }, + } + msgPack.Msgs = append(msgPack.Msgs, msg) + if err := c.s.chanTimeTick.broadcastDmlChannels(coll.PhysicalChannelNames, &msgPack); err != nil { + return err + } + + // TODO: remove this after gc can be notified by rpc. Without this tt, DropCollectionMsg cannot be seen by + // datanodes. + return c.s.chanTimeTick.sendTimeTickToChannel(coll.PhysicalChannelNames, ts) +} + +func (c *GarbageCollectorCtx) GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error { + msgPack := ms.MsgPack{} + baseMsg := ms.BaseMsg{ + Ctx: ctx, + BeginTimestamp: ts, + EndTimestamp: ts, + HashValues: []uint32{0}, + } + msg := &ms.DropPartitionMsg{ + BaseMsg: baseMsg, + DropPartitionRequest: internalpb.DropPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_DropPartition, + Timestamp: ts, + SourceID: c.s.session.ServerID, + }, + PartitionName: partition.PartitionName, + CollectionID: partition.CollectionID, + PartitionID: partition.PartitionID, + }, + } + msgPack.Msgs = append(msgPack.Msgs, msg) + if err := c.s.chanTimeTick.broadcastDmlChannels(pChannels, &msgPack); err != nil { + return err + } + + // TODO: remove this after gc can be notified by rpc. Without this tt, DropCollectionMsg cannot be seen by + // datanodes. + return c.s.chanTimeTick.sendTimeTickToChannel(pChannels, ts) +} diff --git a/internal/rootcoord/garbage_collector_test.go b/internal/rootcoord/garbage_collector_test.go new file mode 100644 index 0000000000..ad5ce9f5dc --- /dev/null +++ b/internal/rootcoord/garbage_collector_test.go @@ -0,0 +1,208 @@ +package rootcoord + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/metastore/model" +) + +func TestGarbageCollectorCtx_ReDropCollection(t *testing.T) { + t.Run("failed to release collection", func(t *testing.T) { + broker := newMockBroker() + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + return errors.New("error mock ReleaseCollection") + } + ticker := newTickerWithMockNormalStream() + core := newTestCore(withBroker(broker), withTtSynchronizer(ticker)) + gc := newGarbageCollectorCtx(core) + gc.ReDropCollection(&model.Collection{}, 1000) + }) + + t.Run("failed to DropCollectionIndex", func(t *testing.T) { + broker := newMockBroker() + releaseCollectionCalled := false + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + releaseCollectionCalled = true + return nil + } + broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error { + return errors.New("error mock DropCollectionIndex") + } + ticker := newTickerWithMockNormalStream() + core := newTestCore(withBroker(broker), withTtSynchronizer(ticker)) + gc := newGarbageCollectorCtx(core) + gc.ReDropCollection(&model.Collection{}, 1000) + assert.True(t, releaseCollectionCalled) + }) + + t.Run("failed to GcCollectionData", func(t *testing.T) { + broker := newMockBroker() + releaseCollectionCalled := false + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + releaseCollectionCalled = true + return nil + } + dropCollectionIndexCalled := false + broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error { + dropCollectionIndexCalled = true + return nil + } + ticker := newTickerWithMockFailStream() // failed to broadcast drop msg. + core := newTestCore(withBroker(broker), withTtSynchronizer(ticker)) + gc := newGarbageCollectorCtx(core) + shardsNum := 2 + pchans := ticker.getDmlChannelNames(shardsNum) + gc.ReDropCollection(&model.Collection{PhysicalChannelNames: pchans}, 1000) + assert.True(t, releaseCollectionCalled) + assert.True(t, dropCollectionIndexCalled) + }) + + t.Run("failed to remove collection", func(t *testing.T) { + broker := newMockBroker() + releaseCollectionCalled := false + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + releaseCollectionCalled = true + return nil + } + dropCollectionIndexCalled := false + broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error { + dropCollectionIndexCalled = true + return nil + } + meta := newMockMetaTable() + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + return errors.New("error mock RemoveCollection") + } + ticker := newTickerWithMockNormalStream() + core := newTestCore(withBroker(broker), + withTtSynchronizer(ticker), + withMeta(meta)) + gc := newGarbageCollectorCtx(core) + gc.ReDropCollection(&model.Collection{}, 1000) + assert.True(t, releaseCollectionCalled) + assert.True(t, dropCollectionIndexCalled) + }) + + t.Run("normal case", func(t *testing.T) { + broker := newMockBroker() + releaseCollectionCalled := false + broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error { + releaseCollectionCalled = true + return nil + } + dropCollectionIndexCalled := false + broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error { + dropCollectionIndexCalled = true + return nil + } + meta := newMockMetaTable() + removeCollectionCalled := false + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + removeCollectionCalled = true + return nil + } + ticker := newTickerWithMockNormalStream() + core := newTestCore(withBroker(broker), + withTtSynchronizer(ticker), + withMeta(meta)) + gc := newGarbageCollectorCtx(core) + gc.ReDropCollection(&model.Collection{}, 1000) + assert.True(t, releaseCollectionCalled) + assert.True(t, dropCollectionIndexCalled) + assert.True(t, removeCollectionCalled) + }) +} + +func TestGarbageCollectorCtx_RemoveCreatingCollection(t *testing.T) { + t.Run("failed to UnwatchChannels", func(t *testing.T) { + broker := newMockBroker() + broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error { + return errors.New("error mock UnwatchChannels") + } + core := newTestCore(withBroker(broker)) + gc := newGarbageCollectorCtx(core) + gc.RemoveCreatingCollection(&model.Collection{}) + }) + + t.Run("failed to RemoveCollection", func(t *testing.T) { + broker := newMockBroker() + unwatchChannelsCalled := false + broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error { + unwatchChannelsCalled = true + return nil + } + meta := newMockMetaTable() + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + return errors.New("error mock RemoveCollection") + } + core := newTestCore(withBroker(broker), withMeta(meta)) + gc := newGarbageCollectorCtx(core) + gc.RemoveCreatingCollection(&model.Collection{}) + assert.True(t, unwatchChannelsCalled) + }) + + t.Run("normal case", func(t *testing.T) { + broker := newMockBroker() + unwatchChannelsCalled := false + broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error { + unwatchChannelsCalled = true + return nil + } + meta := newMockMetaTable() + removeCollectionCalled := false + meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + removeCollectionCalled = true + return nil + } + core := newTestCore(withBroker(broker), withMeta(meta)) + gc := newGarbageCollectorCtx(core) + gc.RemoveCreatingCollection(&model.Collection{}) + assert.True(t, unwatchChannelsCalled) + assert.True(t, removeCollectionCalled) + }) +} + +// func TestGarbageCollectorCtx_ReDropPartition(t *testing.T) { +// t.Run("failed to GcPartitionData", func(t *testing.T) { +// ticker := newTickerWithMockFailStream() // failed to broadcast drop msg. +// shardsNum := 2 +// pchans := ticker.getDmlChannelNames(shardsNum) +// core := newTestCore(withTtSynchronizer(ticker)) +// gc := newGarbageCollectorCtx(core) +// gc.ReDropPartition(pchans, &model.Partition{}, 100000) +// }) +// +// t.Run("failed to RemovePartition", func(t *testing.T) { +// ticker := newTickerWithMockNormalStream() +// shardsNum := 2 +// pchans := ticker.getDmlChannelNames(shardsNum) +// meta := newMockMetaTable() +// meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error { +// return errors.New("error mock RemovePartition") +// } +// core := newTestCore(withMeta(meta), withTtSynchronizer(ticker)) +// gc := newGarbageCollectorCtx(core) +// gc.ReDropPartition(pchans, &model.Partition{}, 100000) +// }) +// +// t.Run("normal case", func(t *testing.T) { +// ticker := newTickerWithMockNormalStream() +// shardsNum := 2 +// pchans := ticker.getDmlChannelNames(shardsNum) +// meta := newMockMetaTable() +// removePartitionCalled := false +// meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error { +// removePartitionCalled = true +// return nil +// } +// core := newTestCore(withMeta(meta), withTtSynchronizer(ticker)) +// gc := newGarbageCollectorCtx(core) +// gc.ReDropPartition(pchans, &model.Partition{}, 100000) +// assert.True(t, removePartitionCalled) +// }) +// } +// diff --git a/internal/rootcoord/has_collection_task.go b/internal/rootcoord/has_collection_task.go new file mode 100644 index 0000000000..065af3a7d6 --- /dev/null +++ b/internal/rootcoord/has_collection_task.go @@ -0,0 +1,35 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +// hasCollectionTask has collection request task +type hasCollectionTask struct { + baseTaskV2 + Req *milvuspb.HasCollectionRequest + Rsp *milvuspb.BoolResponse +} + +func (t *hasCollectionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasCollection); err != nil { + return err + } + return nil +} + +// Execute task execution +func (t *hasCollectionTask) Execute(ctx context.Context) error { + t.Rsp.Status = succStatus() + if t.Req.GetTimeStamp() == 0 { + t.Req.TimeStamp = typeutil.MaxTimestamp + } + _, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp()) + t.Rsp.Value = err == nil + return nil +} diff --git a/internal/rootcoord/has_collection_task_test.go b/internal/rootcoord/has_collection_task_test.go new file mode 100644 index 0000000000..c493286786 --- /dev/null +++ b/internal/rootcoord/has_collection_task_test.go @@ -0,0 +1,85 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/metastore/model" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_hasCollectionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &hasCollectionTask{ + Req: &milvuspb.HasCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_Undefined, + }, + CollectionName: "test coll", + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &hasCollectionTask{ + Req: &milvuspb.HasCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasCollection, + }, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_hasCollectionTask_Execute(t *testing.T) { + t.Run("failed", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &hasCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.HasCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasCollection, + }, + }, + Rsp: &milvuspb.BoolResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success) + assert.False(t, task.Rsp.GetValue()) + }) + + t.Run("success", func(t *testing.T) { + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return nil, nil + } + core := newTestCore(withMeta(meta)) + task := &hasCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.HasCollectionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasCollection, + }, + }, + Rsp: &milvuspb.BoolResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success) + assert.True(t, task.Rsp.GetValue()) + }) +} diff --git a/internal/rootcoord/has_partition_task.go b/internal/rootcoord/has_partition_task.go new file mode 100644 index 0000000000..3ccdee83f9 --- /dev/null +++ b/internal/rootcoord/has_partition_task.go @@ -0,0 +1,43 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" +) + +// hasPartitionTask has partition request task +type hasPartitionTask struct { + baseTaskV2 + Req *milvuspb.HasPartitionRequest + Rsp *milvuspb.BoolResponse +} + +func (t *hasPartitionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasPartition); err != nil { + return err + } + return nil +} + +// Execute task execution +func (t *hasPartitionTask) Execute(ctx context.Context) error { + t.Rsp.Status = succStatus() + t.Rsp.Value = false + // TODO: why HasPartitionRequest doesn't contain Timestamp but other requests do. + coll, err := t.core.meta.GetCollectionByName(ctx, t.Req.CollectionName, typeutil.MaxTimestamp) + if err != nil { + t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error()) + return err + } + for _, part := range coll.Partitions { + if part.PartitionName == t.Req.PartitionName { + t.Rsp.Value = true + break + } + } + return nil +} diff --git a/internal/rootcoord/has_partition_task_test.go b/internal/rootcoord/has_partition_task_test.go new file mode 100644 index 0000000000..3e31b3bcd7 --- /dev/null +++ b/internal/rootcoord/has_partition_task_test.go @@ -0,0 +1,127 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_hasPartitionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &hasPartitionTask{ + Req: &milvuspb.HasPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_Undefined, + }, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &hasPartitionTask{ + Req: &milvuspb.HasPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasPartition, + }, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_hasPartitionTask_Execute(t *testing.T) { + t.Run("fail to get collection", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &hasPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.HasPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasPartition, + }, + CollectionName: "test coll", + }, + Rsp: &milvuspb.BoolResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists) + assert.False(t, task.Rsp.GetValue()) + }) + + t.Run("failed", func(t *testing.T) { + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return &model.Collection{ + Partitions: []*model.Partition{ + { + PartitionName: "invalid test partition", + }, + }, + }, nil + } + core := newTestCore(withMeta(meta)) + task := &hasPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.HasPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasCollection, + }, + CollectionName: "test coll", + PartitionName: "test partition", + }, + Rsp: &milvuspb.BoolResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success) + assert.False(t, task.Rsp.GetValue()) + }) + + t.Run("success", func(t *testing.T) { + meta := newMockMetaTable() + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return &model.Collection{ + Partitions: []*model.Partition{ + { + PartitionName: "invalid test partition", + }, + { + PartitionName: "test partition", + }, + }, + }, nil + } + core := newTestCore(withMeta(meta)) + task := &hasPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.HasPartitionRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_HasCollection, + }, + CollectionName: "test coll", + PartitionName: "test partition", + }, + Rsp: &milvuspb.BoolResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success) + assert.True(t, task.Rsp.GetValue()) + }) +} diff --git a/internal/rootcoord/import_helper.go b/internal/rootcoord/import_helper.go new file mode 100644 index 0000000000..0ade04e85a --- /dev/null +++ b/internal/rootcoord/import_helper.go @@ -0,0 +1,71 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/proto/datapb" + "go.uber.org/zap" +) + +type GetCollectionNameFunc func(collID, partitionID UniqueID) (string, string, error) +type IDAllocator func(count uint32) (UniqueID, UniqueID, error) +type ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse + +type ImportFactory interface { + NewGetCollectionNameFunc() GetCollectionNameFunc + NewIDAllocator() IDAllocator + NewImportFunc() ImportFunc +} + +type ImportFactoryImpl struct { + c *Core +} + +func (f ImportFactoryImpl) NewGetCollectionNameFunc() GetCollectionNameFunc { + return GetCollectionNameWithCore(f.c) +} + +func (f ImportFactoryImpl) NewIDAllocator() IDAllocator { + return IDAllocatorWithCore(f.c) +} + +func (f ImportFactoryImpl) NewImportFunc() ImportFunc { + return ImportFuncWithCore(f.c) +} + +func NewImportFactory(c *Core) ImportFactory { + return &ImportFactoryImpl{c: c} +} + +func GetCollectionNameWithCore(c *Core) GetCollectionNameFunc { + return func(collID, partitionID UniqueID) (string, string, error) { + colName, err := c.meta.GetCollectionNameByID(collID) + if err != nil { + log.Error("Core failed to get collection name by id", zap.Int64("ID", collID), zap.Error(err)) + return "", "", err + } + + partName, err := c.meta.GetPartitionNameByID(collID, partitionID, 0) + if err != nil { + log.Error("Core failed to get partition name by id", zap.Int64("ID", partitionID), zap.Error(err)) + return colName, "", err + } + + return colName, partName, nil + } +} + +func IDAllocatorWithCore(c *Core) IDAllocator { + return func(count uint32) (UniqueID, UniqueID, error) { + return c.idAllocator.Alloc(count) + } +} + +func ImportFuncWithCore(c *Core) ImportFunc { + return func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse { + // TODO: better to handle error here. + resp, _ := c.broker.Import(ctx, req) + return resp + } +} diff --git a/internal/rootcoord/meta_table.go b/internal/rootcoord/meta_table.go index fcdde27fc8..7925fb724e 100644 --- a/internal/rootcoord/meta_table.go +++ b/internal/rootcoord/meta_table.go @@ -22,6 +22,10 @@ import ( "fmt" "sync" + "github.com/milvus-io/milvus/internal/common" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + "go.uber.org/zap" "github.com/milvus-io/milvus/internal/log" @@ -64,809 +68,590 @@ const ( DefaultStringIndexType = "Trie" ) -// MetaTable store all rootCoord meta info +type IMetaTable interface { + AddCollection(ctx context.Context, coll *model.Collection) error + ChangeCollectionState(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error + RemoveCollection(ctx context.Context, collectionID UniqueID, ts Timestamp) error + GetCollectionByName(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) + GetCollectionByID(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) + ListCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) + ListAbnormalCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) + ListCollectionPhysicalChannels() map[typeutil.UniqueID][]string + AddPartition(ctx context.Context, partition *model.Partition) error + ChangePartitionState(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error + RemovePartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error + CreateAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error + DropAlias(ctx context.Context, alias string, ts Timestamp) error + AlterAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error + + // TODO: it'll be a big cost if we handle the time travel logic, since we should always list all aliases in catalog. + IsAlias(name string) bool + ListAliasesByID(collID UniqueID) []string + + // TODO: better to accept ctx. + // TODO: should GetCollectionNameByID & GetCollectionIDByName also accept ts? + GetCollectionNameByID(collID UniqueID) (string, error) // serve for bulk load. + GetPartitionNameByID(collID UniqueID, partitionID UniqueID, ts Timestamp) (string, error) // serve for bulk load. + GetCollectionIDByName(name string) (UniqueID, error) // serve for bulk load. + GetPartitionByName(collID UniqueID, partitionName string, ts Timestamp) (UniqueID, error) // serve for bulk load. + + // TODO: better to accept ctx. + AddCredential(credInfo *internalpb.CredentialInfo) error + GetCredential(username string) (*internalpb.CredentialInfo, error) + DeleteCredential(username string) error + AlterCredential(credInfo *internalpb.CredentialInfo) error + ListCredentialUsernames() (*milvuspb.ListCredUsersResponse, error) + + // TODO: better to accept ctx. + CreateRole(tenant string, entity *milvuspb.RoleEntity) error + DropRole(tenant string, roleName string) error + OperateUserRole(tenant string, userEntity *milvuspb.UserEntity, roleEntity *milvuspb.RoleEntity, operateType milvuspb.OperateUserRoleType) error + SelectRole(tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) + SelectUser(tenant string, entity *milvuspb.UserEntity, includeRoleInfo bool) ([]*milvuspb.UserResult, error) + OperatePrivilege(tenant string, entity *milvuspb.GrantEntity, operateType milvuspb.OperatePrivilegeType) error + SelectGrant(tenant string, entity *milvuspb.GrantEntity) ([]*milvuspb.GrantEntity, error) + DropGrant(tenant string, role *milvuspb.RoleEntity) error + ListPolicy(tenant string) ([]string, error) + ListUserRole(tenant string) ([]string, error) +} + type MetaTable struct { ctx context.Context catalog metastore.RootCoordCatalog - collID2Meta map[typeutil.UniqueID]model.Collection // collection id -> collection meta - collName2ID map[string]typeutil.UniqueID // collection name to collection id - collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id - //partID2IndexedSegID map[typeutil.UniqueID]map[typeutil.UniqueID]bool // partition id -> segment_id -> bool - //segID2IndexID map[typeutil.UniqueID]typeutil.UniqueID // segment_id -> index_id - //indexID2Meta map[typeutil.UniqueID]*model.Index // collection id/index_id -> meta + collID2Meta map[typeutil.UniqueID]*model.Collection // collection id -> collection meta + collName2ID map[string]typeutil.UniqueID // collection name to collection id + collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id ddLock sync.RWMutex permissionLock sync.RWMutex } -// NewMetaTable creates meta table for rootcoord, which stores all in-memory information -// for collection, partition, segment, index etc. func NewMetaTable(ctx context.Context, catalog metastore.RootCoordCatalog) (*MetaTable, error) { mt := &MetaTable{ ctx: contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName), catalog: catalog, - ddLock: sync.RWMutex{}, } - err := mt.reloadFromCatalog() - if err != nil { + if err := mt.reload(); err != nil { return nil, err } return mt, nil } -func (mt *MetaTable) reloadFromCatalog() error { - mt.collID2Meta = make(map[typeutil.UniqueID]model.Collection) - mt.collName2ID = make(map[string]typeutil.UniqueID) - mt.collAlias2ID = make(map[string]typeutil.UniqueID) - - collAliases, err := mt.catalog.ListAliases(mt.ctx, 0) - if err != nil { - return err - } - for _, aliasInfo := range collAliases { - mt.collAlias2ID[aliasInfo.Name] = aliasInfo.CollectionID - } - - collMap, err := mt.catalog.ListCollections(mt.ctx, 0) - if err != nil { - return err - } - - for _, coll := range collMap { - if _, ok := mt.collAlias2ID[coll.Name]; ok { - continue - } - - mt.collID2Meta[coll.CollectionID] = *coll - mt.collName2ID[coll.Name] = coll.CollectionID - } - - log.Debug("reload meta table from KV successfully") - return nil -} - -// AddCollection add collection -func (mt *MetaTable) AddCollection(coll *model.Collection, ts typeutil.Timestamp, ddOpStr string) error { +func (mt *MetaTable) reload() error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - if _, ok := mt.collName2ID[coll.Name]; ok { - return fmt.Errorf("collection %s exist", coll.Name) - } + mt.collID2Meta = make(map[UniqueID]*model.Collection) + mt.collName2ID = make(map[string]UniqueID) + mt.collAlias2ID = make(map[string]UniqueID) - coll.CreateTime = ts - for _, partition := range coll.Partitions { - partition.PartitionCreatedTimestamp = ts - } - - if err := mt.catalog.CreateCollection(mt.ctx, coll, ts); err != nil { + // max ts means listing latest resources, meta table should always cache the latest version of catalog. + collections, err := mt.catalog.ListCollections(mt.ctx, typeutil.MaxTimestamp) + if err != nil { return err } + for name, collection := range collections { + mt.collID2Meta[collection.CollectionID] = collection + mt.collName2ID[name] = collection.CollectionID + } - mt.collID2Meta[coll.CollectionID] = *coll + // max ts means listing latest resources, meta table should always cache the latest version of catalog. + aliases, err := mt.catalog.ListAliases(mt.ctx, typeutil.MaxTimestamp) + if err != nil { + return err + } + for _, alias := range aliases { + mt.collAlias2ID[alias.Name] = alias.CollectionID + } + + return nil +} + +func (mt *MetaTable) AddCollection(ctx context.Context, coll *model.Collection) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + // Note: + // 1, idempotency check was already done outside; + // 2, no need to check time travel logic, since ts should always be the latest; + + if coll.State != pb.CollectionState_CollectionCreating { + return fmt.Errorf("collection state should be creating, collection name: %s, collection id: %d, state: %s", coll.Name, coll.CollectionID, coll.State) + } + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.CreateCollection(ctx1, coll, coll.CreateTime); err != nil { + return err + } mt.collName2ID[coll.Name] = coll.CollectionID + mt.collID2Meta[coll.CollectionID] = coll.Clone() + log.Info("add collection to meta table", zap.String("collection", coll.Name), + zap.Int64("id", coll.CollectionID), zap.Uint64("ts", coll.CreateTime)) return nil } -// DeleteCollection delete collection -func (mt *MetaTable) DeleteCollection(collID typeutil.UniqueID, ts typeutil.Timestamp, ddOpStr string) error { +func (mt *MetaTable) ChangeCollectionState(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - col, ok := mt.collID2Meta[collID] + coll, ok := mt.collID2Meta[collectionID] if !ok { - return fmt.Errorf("can't find collection. id = %d", collID) + return nil } - - var aliases []string - // delete collection aliases - for alias, cid := range mt.collAlias2ID { - if cid == collID { - aliases = append(aliases, alias) - } - } - - collection := &model.Collection{ - CollectionID: collID, - Aliases: aliases, - } - - if err := mt.catalog.DropCollection(mt.ctx, collection, ts); err != nil { + clone := coll.Clone() + clone.State = state + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.AlterCollection(ctx1, coll, clone, metastore.MODIFY, ts); err != nil { return err } + mt.collID2Meta[collectionID] = clone + log.Info("change collection state", zap.Int64("collection", collectionID), + zap.String("state", state.String()), zap.Uint64("ts", ts)) - //// update segID2IndexID - //for _, partition := range col.Partitions { - // partID := partition.PartitionID - // if segIDMap, ok := mt.partID2IndexedSegID[partID]; ok { - // for segID := range segIDMap { - // delete(mt.segID2IndexID, segID) - // } - // } - // delete(mt.partID2IndexedSegID, partID) - //} - // - //for _, t := range col.FieldIDToIndexID { - // delete(mt.indexID2Meta, t.Value) - //} + return nil +} + +func (mt *MetaTable) RemoveCollection(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + // Note: we cannot handle case that dropping collection with `ts1` but a collection exists in catalog with newer ts + // which is bigger than `ts1`. So we assume that ts should always be the latest. + + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + aliases := mt.listAliasesByID(collectionID) + if err := mt.catalog.DropCollection(ctx1, &model.Collection{CollectionID: collectionID, Aliases: aliases}, ts); err != nil { + return err + } + delete(mt.collID2Meta, collectionID) + + var name string + coll, ok := mt.collID2Meta[collectionID] + if ok && coll != nil { + name = coll.Name + delete(mt.collName2ID, name) + } - // delete collection aliases for _, alias := range aliases { delete(mt.collAlias2ID, alias) } - delete(mt.collID2Meta, collID) - delete(mt.collName2ID, col.Name) - + log.Info("remove collection", zap.String("name", name), zap.Int64("id", collectionID), zap.Strings("aliases", aliases)) return nil } -// HasCollection return collection existence -func (mt *MetaTable) HasCollection(collID typeutil.UniqueID, ts typeutil.Timestamp) bool { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - if ts == 0 { - _, ok := mt.collID2Meta[collID] - return ok - } +// getCollectionByIDInternal get collection by collection id without lock. +func (mt *MetaTable) getCollectionByIDInternal(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) { + var coll *model.Collection + var err error - return mt.catalog.CollectionExists(mt.ctx, collID, ts) -} - -// GetCollectionIDByName returns the collection ID according to its name. -// Returns an error if no matching ID is found. -func (mt *MetaTable) GetCollectionIDByName(cName string) (typeutil.UniqueID, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - var cID UniqueID - var ok bool - if cID, ok = mt.collName2ID[cName]; !ok { - return 0, fmt.Errorf("collection ID not found for collection name %s", cName) - } - return cID, nil -} - -// GetCollectionNameByID returns the collection name according to its ID. -// Returns an error if no matching name is found. -func (mt *MetaTable) GetCollectionNameByID(collectionID typeutil.UniqueID) (string, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - col, ok := mt.collID2Meta[collectionID] - if !ok { - return "", fmt.Errorf("can't find collection id : %d", collectionID) - } - return col.Name, nil -} - -// GetCollectionByID return collection meta by collection id -func (mt *MetaTable) GetCollectionByID(collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - - if ts == 0 { - col, ok := mt.collID2Meta[collectionID] - if !ok { - return nil, fmt.Errorf("can't find collection id : %d", collectionID) - } - return col.Clone(), nil - } - - return mt.catalog.GetCollectionByID(mt.ctx, collectionID, ts) -} - -// GetCollectionByName return collection meta by collection name -func (mt *MetaTable) GetCollectionByName(collectionName string, ts typeutil.Timestamp) (*model.Collection, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - - if ts == 0 { - vid, ok := mt.collName2ID[collectionName] - if !ok { - if vid, ok = mt.collAlias2ID[collectionName]; !ok { - return nil, fmt.Errorf("can't find collection: " + collectionName) - } - } - col, ok := mt.collID2Meta[vid] - if !ok { - return nil, fmt.Errorf("can't find collection %s with id %d", collectionName, vid) - } - - return col.Clone(), nil - } - - return mt.catalog.GetCollectionByName(mt.ctx, collectionName, ts) -} - -// ListCollections list all collection names -func (mt *MetaTable) ListCollections(ts typeutil.Timestamp) (map[string]*model.Collection, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - cols := make(map[string]*model.Collection) - - if ts == 0 { - for collName, collID := range mt.collName2ID { - col := mt.collID2Meta[collID] - cols[collName] = col.Clone() - } - return cols, nil - } - - return mt.catalog.ListCollections(mt.ctx, ts) -} - -// ListAliases list all collection aliases -func (mt *MetaTable) ListAliases(collID typeutil.UniqueID) []string { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - var aliases []string - for alias, cid := range mt.collAlias2ID { - if cid == collID { - aliases = append(aliases, alias) + coll, ok := mt.collID2Meta[collectionID] + if !ok || !coll.Available() || coll.CreateTime > ts { + // travel meta information from catalog. + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + coll, err = mt.catalog.GetCollectionByID(ctx1, collectionID, ts) + if err != nil { + return nil, err } } - return aliases + + if !coll.Available() { + // use coll.Name to match error message of regression. TODO: remove this after error code is ready. + return nil, fmt.Errorf("can't find collection: %s", coll.Name) + } + + clone := coll.Clone() + // pick available partitions. + clone.Partitions = nil + for _, partition := range coll.Partitions { + if partition.Available() { + clone.Partitions = append(clone.Partitions, partition.Clone()) + } + } + return clone, nil } -// ListCollectionVirtualChannels list virtual channels of all collections -func (mt *MetaTable) ListCollectionVirtualChannels() map[typeutil.UniqueID][]string { +func (mt *MetaTable) GetCollectionByName(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() - chanMap := make(map[typeutil.UniqueID][]string) - for id, collInfo := range mt.collID2Meta { - chanMap[id] = collInfo.VirtualChannelNames + var collectionID UniqueID + + collectionID, ok := mt.collAlias2ID[collectionName] + if ok { + return mt.getCollectionByIDInternal(ctx, collectionID, ts) } - return chanMap + + collectionID, ok = mt.collName2ID[collectionName] + if ok { + return mt.getCollectionByIDInternal(ctx, collectionID, ts) + } + + // travel meta information from catalog. No need to check time travel logic again, since catalog already did. + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + coll, err := mt.catalog.GetCollectionByName(ctx1, collectionName, ts) + if err != nil { + return nil, err + } + if !coll.Available() { + return nil, fmt.Errorf("can't find collection: %s", collectionName) + } + return coll, nil } -// ListCollectionPhysicalChannels list physical channels of all collections +func (mt *MetaTable) GetCollectionByID(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + return mt.getCollectionByIDInternal(ctx, collectionID, ts) +} + +func (mt *MetaTable) ListCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + // list collections should always be loaded from catalog. + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + colls, err := mt.catalog.ListCollections(ctx1, ts) + if err != nil { + return nil, err + } + onlineCollections := make([]*model.Collection, 0, len(colls)) + for _, coll := range colls { + if coll.Available() { + onlineCollections = append(onlineCollections, coll) + } + } + return onlineCollections, nil +} + +func (mt *MetaTable) ListAbnormalCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + // list collections should always be loaded from catalog. + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + colls, err := mt.catalog.ListCollections(ctx1, ts) + if err != nil { + return nil, err + } + abnormalCollections := make([]*model.Collection, 0, len(colls)) + for _, coll := range colls { + if !coll.Available() { + abnormalCollections = append(abnormalCollections, coll) + } + } + return abnormalCollections, nil +} + +// ListCollectionPhysicalChannels list physical channels of all collections. func (mt *MetaTable) ListCollectionPhysicalChannels() map[typeutil.UniqueID][]string { mt.ddLock.RLock() defer mt.ddLock.RUnlock() - chanMap := make(map[typeutil.UniqueID][]string) + + chanMap := make(map[UniqueID][]string) for id, collInfo := range mt.collID2Meta { - chanMap[id] = collInfo.PhysicalChannelNames + chanMap[id] = common.CloneStringList(collInfo.PhysicalChannelNames) } + return chanMap } -// AddPartition add partition -func (mt *MetaTable) AddPartition(collID typeutil.UniqueID, partitionName string, partitionID typeutil.UniqueID, ts typeutil.Timestamp, ddOpStr string) error { +func (mt *MetaTable) AddPartition(ctx context.Context, partition *model.Partition) error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - coll, ok := mt.collID2Meta[collID] - if !ok { - return fmt.Errorf("can't find collection. id = %d", collID) - } - // number of partition tags (except _default) should be limited to 4096 by default - if int64(len(coll.Partitions)) >= Params.RootCoordCfg.MaxPartitionNum { - return fmt.Errorf("maximum partition's number should be limit to %d", Params.RootCoordCfg.MaxPartitionNum) + coll, ok := mt.collID2Meta[partition.CollectionID] + if !ok || !coll.Available() { + return fmt.Errorf("collection not exists: %d", partition.CollectionID) } - - for _, p := range coll.Partitions { - if p.PartitionID == partitionID { - return fmt.Errorf("partition id = %d already exists", partitionID) - } - if p.PartitionName == partitionName { - return fmt.Errorf("partition name = %s already exists", partitionName) - } - // no necessary to check created timestamp + if partition.State != pb.PartitionState_PartitionCreated { + return fmt.Errorf("partition state is not created, collection: %d, partition: %d, state: %s", partition.CollectionID, partition.PartitionID, partition.State) } - - partition := &model.Partition{ - PartitionID: partitionID, - PartitionName: partitionName, - PartitionCreatedTimestamp: ts, - CollectionID: collID, - } - coll.Partitions = append(coll.Partitions, partition) - - if err := mt.catalog.CreatePartition(mt.ctx, partition, ts); err != nil { + if err := mt.catalog.CreatePartition(ctx, partition, partition.PartitionCreatedTimestamp); err != nil { return err } - - mt.collID2Meta[collID] = coll + mt.collID2Meta[partition.CollectionID].Partitions = append(mt.collID2Meta[partition.CollectionID].Partitions, partition.Clone()) + log.Info("add partition to meta table", + zap.Int64("collection", partition.CollectionID), zap.String("partition", partition.PartitionName), + zap.Int64("partitionid", partition.PartitionID), zap.Uint64("ts", partition.PartitionCreatedTimestamp)) return nil } -// GetPartitionNameByID return partition name by partition id -func (mt *MetaTable) GetPartitionNameByID(collID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) (string, error) { - if ts == 0 { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - col, ok := mt.collID2Meta[collID] - if !ok { - return "", fmt.Errorf("can't find collection id = %d", collID) +func (mt *MetaTable) ChangePartitionState(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + coll, ok := mt.collID2Meta[collectionID] + if !ok { + return nil + } + for idx, part := range coll.Partitions { + if part.PartitionID == partitionID { + clone := part.Clone() + clone.State = state + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.AlterPartition(ctx1, part, clone, metastore.MODIFY, ts); err != nil { + return err + } + mt.collID2Meta[collectionID].Partitions[idx] = clone + log.Info("change partition state", zap.Int64("collection", collectionID), + zap.Int64("partition", partitionID), zap.String("state", state.String()), + zap.Uint64("ts", ts)) + return nil } - for _, partition := range col.Partitions { - if partition.PartitionID == partitionID { + } + return fmt.Errorf("partition not exist, collection: %d, partition: %d", collectionID, partitionID) +} + +func (mt *MetaTable) RemovePartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.DropPartition(ctx1, collectionID, partitionID, ts); err != nil { + return err + } + coll, ok := mt.collID2Meta[collectionID] + if !ok { + return nil + } + var loc = -1 + for idx, part := range coll.Partitions { + if part.PartitionID == partitionID { + loc = idx + break + } + } + if loc != -1 { + coll.Partitions = append(coll.Partitions[:loc], coll.Partitions[loc+1:]...) + } + log.Info("remove partition", zap.Int64("collection", collectionID), zap.Int64("partition", partitionID), zap.Uint64("ts", ts)) + return nil +} + +func (mt *MetaTable) CreateAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + // It's ok that we don't read from catalog when cache missed. + // Since cache always keep the latest version, and the ts should always be the latest. + + if _, ok := mt.collName2ID[alias]; ok { + return fmt.Errorf("cannot create alias, collection already exists with same name: %s", alias) + } + + collectionID, ok := mt.collName2ID[collectionName] + if !ok { + // you cannot alias to a non-existent collection. + return fmt.Errorf("collection not exists: %s", collectionName) + } + + // check if alias exists. + aliasedCollectionID, ok := mt.collAlias2ID[alias] + if ok && aliasedCollectionID == collectionID { + log.Warn("add duplicate alias", zap.String("alias", alias), zap.String("collection", collectionName), zap.Uint64("ts", ts)) + return nil + } else if ok { + // TODO: better to check if aliasedCollectionID exist or is available, though not very possible. + aliasedColl := mt.collID2Meta[aliasedCollectionID] + return fmt.Errorf("alias exists and already aliased to another collection, alias: %s, collection: %s, other collection: %s", alias, collectionName, aliasedColl.Name) + } + // alias didn't exist. + + coll, ok := mt.collID2Meta[collectionID] + if !ok || !coll.Available() { + // you cannot alias to a non-existent collection. + return fmt.Errorf("collection not exists: %s", collectionName) + } + + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.CreateAlias(ctx1, &model.Alias{ + Name: alias, + CollectionID: collectionID, + CreatedTime: ts, + State: pb.AliasState_AliasCreated, + }, ts); err != nil { + return err + } + mt.collAlias2ID[alias] = collectionID + log.Info("create alias", zap.String("alias", alias), zap.String("collection", collectionName), zap.Uint64("ts", ts)) + return nil +} + +func (mt *MetaTable) DropAlias(ctx context.Context, alias string, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.DropAlias(ctx1, alias, ts); err != nil { + return err + } + delete(mt.collAlias2ID, alias) + log.Info("drop alias", zap.String("alias", alias), zap.Uint64("ts", ts)) + return nil +} + +func (mt *MetaTable) AlterAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + mt.ddLock.Lock() + defer mt.ddLock.Unlock() + + // It's ok that we don't read from catalog when cache missed. + // Since cache always keep the latest version, and the ts should always be the latest. + + if _, ok := mt.collName2ID[alias]; ok { + return fmt.Errorf("cannot alter alias, collection already exists with same name: %s", alias) + } + + collectionID, ok := mt.collName2ID[collectionName] + if !ok { + // you cannot alias to a non-existent collection. + return fmt.Errorf("collection not exists: %s", collectionName) + } + + coll, ok := mt.collID2Meta[collectionID] + if !ok || !coll.Available() { + // you cannot alias to a non-existent collection. + return fmt.Errorf("collection not exists: %s", collectionName) + } + + // check if alias exists. + _, ok = mt.collAlias2ID[alias] + if !ok { + // + return fmt.Errorf("failed to alter alias, alias does not exist: %s", alias) + } + + ctx1 := contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName) + if err := mt.catalog.AlterAlias(ctx1, &model.Alias{ + Name: alias, + CollectionID: collectionID, + CreatedTime: ts, + State: pb.AliasState_AliasCreated, + }, ts); err != nil { + return err + } + + // alias switch to another collection anyway. + mt.collAlias2ID[alias] = collectionID + log.Info("alter alias", zap.String("alias", alias), zap.String("collection", collectionName), zap.Uint64("ts", ts)) + return nil +} + +func (mt *MetaTable) IsAlias(name string) bool { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + _, ok := mt.collAlias2ID[name] + return ok +} + +func (mt *MetaTable) listAliasesByID(collID UniqueID) []string { + ret := make([]string, 0) + for alias, id := range mt.collAlias2ID { + if id == collID { + ret = append(ret, alias) + } + } + return ret +} + +func (mt *MetaTable) ListAliasesByID(collID UniqueID) []string { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + return mt.listAliasesByID(collID) +} + +// GetCollectionNameByID serve for bulk load. TODO: why this didn't accept ts? +func (mt *MetaTable) GetCollectionNameByID(collID UniqueID) (string, error) { + mt.ddLock.RUnlock() + defer mt.ddLock.RUnlock() + + coll, ok := mt.collID2Meta[collID] + if !ok || !coll.Available() { + return "", fmt.Errorf("collection not exist: %d", collID) + } + + return coll.Name, nil +} + +// GetPartitionNameByID serve for bulk load. +func (mt *MetaTable) GetPartitionNameByID(collID UniqueID, partitionID UniqueID, ts Timestamp) (string, error) { + mt.ddLock.RUnlock() + defer mt.ddLock.RUnlock() + + coll, ok := mt.collID2Meta[collID] + if ok && coll.Available() && coll.CreateTime <= ts { + // cache hit. + for _, partition := range coll.Partitions { + if partition.Available() && partition.PartitionID == partitionID && partition.PartitionCreatedTimestamp <= ts { + // cache hit. return partition.PartitionName, nil } } - return "", fmt.Errorf("partition %d does not exist", partitionID) } - - col, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) + // cache miss, get from catalog anyway. + coll, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) if err != nil { return "", err } - for _, partition := range col.Partitions { - if partition.PartitionID == partitionID { + if !coll.Available() { + return "", fmt.Errorf("collection not exist: %d", collID) + } + for _, partition := range coll.Partitions { + // no need to check time travel logic again, since catalog already did. + if partition.Available() && partition.PartitionID == partitionID { return partition.PartitionName, nil } } - return "", fmt.Errorf("partition %d does not exist", partitionID) + return "", fmt.Errorf("partition not exist: %d", partitionID) } -func (mt *MetaTable) getPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) { - if ts == 0 { - col, ok := mt.collID2Meta[collID] - if !ok { - return 0, fmt.Errorf("can't find collection id = %d", collID) - } - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { +// GetCollectionIDByName serve for bulk load. TODO: why this didn't accept ts? +func (mt *MetaTable) GetCollectionIDByName(name string) (UniqueID, error) { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + id, ok := mt.collName2ID[name] + if !ok { + return InvalidCollectionID, fmt.Errorf("collection not exists: %s", name) + } + return id, nil +} + +// GetPartitionByName serve for bulk load. +func (mt *MetaTable) GetPartitionByName(collID UniqueID, partitionName string, ts Timestamp) (UniqueID, error) { + mt.ddLock.RLock() + defer mt.ddLock.RUnlock() + + coll, ok := mt.collID2Meta[collID] + if ok && coll.Available() && coll.CreateTime <= ts { + // cache hit. + for _, partition := range coll.Partitions { + if partition.Available() && partition.PartitionName == partitionName && partition.PartitionCreatedTimestamp <= ts { + // cache hit. return partition.PartitionID, nil } } - return 0, fmt.Errorf("partition %s does not exist", partitionName) } - - col, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) + // cache miss, get from catalog anyway. + coll, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) if err != nil { - return 0, err + return common.InvalidPartitionID, err } - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { + if !coll.Available() { + return common.InvalidPartitionID, fmt.Errorf("collection not exist: %d", collID) + } + for _, partition := range coll.Partitions { + // no need to check time travel logic again, since catalog already did. + if partition.Available() && partition.PartitionName == partitionName { return partition.PartitionID, nil } } - return 0, fmt.Errorf("partition %s does not exist", partitionName) -} -// GetPartitionByName return partition id by partition name -func (mt *MetaTable) GetPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - return mt.getPartitionByName(collID, partitionName, ts) -} - -// HasPartition check partition existence -func (mt *MetaTable) HasPartition(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) bool { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - _, err := mt.getPartitionByName(collID, partitionName, ts) - return err == nil -} - -// DeletePartition delete partition -func (mt *MetaTable) DeletePartition(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp, ddOpStr string) (typeutil.UniqueID, error) { - mt.ddLock.Lock() - defer mt.ddLock.Unlock() - - if partitionName == Params.CommonCfg.DefaultPartitionName { - return 0, fmt.Errorf("default partition cannot be deleted") - } - - col, ok := mt.collID2Meta[collID] - if !ok { - return 0, fmt.Errorf("can't find collection id = %d", collID) - } - - // check tag exists - exist := false - - parts := make([]*model.Partition, 0, len(col.Partitions)) - - var partID typeutil.UniqueID - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { - partID = partition.PartitionID - exist = true - } else { - parts = append(parts, partition) - } - } - if !exist { - return 0, fmt.Errorf("partition %s does not exist", partitionName) - } - - col.Partitions = parts - if err := mt.catalog.DropPartition(mt.ctx, col.CollectionID, partID, ts); err != nil { - return 0, err - } - - // update cache - mt.collID2Meta[collID] = col - //if segIDMap, ok := mt.partID2IndexedSegID[partID]; ok { - // for segID := range segIDMap { - // indexID, ok := mt.segID2IndexID[segID] - // if !ok { - // continue - // } - // delete(mt.segID2IndexID, segID) - // - // indexMeta, ok := mt.indexID2Meta[indexID] - // if ok { - // delete(indexMeta.SegmentIndexes, segID) - // } - // } - //} - //delete(mt.partID2IndexedSegID, partID) - - return partID, nil -} - -// GetFieldSchema return field schema -func (mt *MetaTable) GetFieldSchema(collName string, fieldName string) (model.Field, error) { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - - return mt.getFieldSchemaInternal(collName, fieldName) -} - -func (mt *MetaTable) getFieldSchemaInternal(collName string, fieldName string) (model.Field, error) { - collID, ok := mt.collName2ID[collName] - if !ok { - collID, ok = mt.collAlias2ID[collName] - if !ok { - return model.Field{}, fmt.Errorf("collection %s not found", collName) - } - } - col, ok := mt.collID2Meta[collID] - if !ok { - return model.Field{}, fmt.Errorf("collection %s not found", collName) - } - - for _, field := range col.Fields { - if field.Name == fieldName { - return *field, nil - } - } - return model.Field{}, fmt.Errorf("collection %s doesn't have filed %s", collName, fieldName) -} - -// IsSegmentIndexed check if segment has indexed -//func (mt *MetaTable) IsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *model.Field, indexParams []*commonpb.KeyValuePair) bool { -// mt.ddLock.RLock() -// defer mt.ddLock.RUnlock() -// return mt.isSegmentIndexedInternal(segID, fieldSchema, indexParams) -//} - -//func (mt *MetaTable) isSegmentIndexedInternal(segID typeutil.UniqueID, fieldSchema *model.Field, indexParams []*commonpb.KeyValuePair) bool { -// index, err := mt.getIdxMetaBySegID(segID) -// if err != nil { -// return false -// } -// -// segIndex, ok := index.SegmentIndexes[segID] -// if ok && !index.IsDeleted && -// index.FieldID == fieldSchema.FieldID && -// EqualKeyPairArray(indexParams, index.IndexParams) && -// segIndex.EnableIndex { -// return true -// } -// -// return false -//} - -func (mt *MetaTable) getCollectionInfoInternal(collName string) (model.Collection, error) { - collID, ok := mt.collName2ID[collName] - if !ok { - collID, ok = mt.collAlias2ID[collName] - if !ok { - return model.Collection{}, fmt.Errorf("collection not found: %s", collName) - } - } - collMeta, ok := mt.collID2Meta[collID] - if !ok { - return model.Collection{}, fmt.Errorf("collection not found: %s", collName) - } - return collMeta, nil -} - -//func (mt *MetaTable) checkFieldCanBeIndexed(collMeta model.Collection, fieldSchema model.Field, idxInfo *model.Index) error { -// for _, tuple := range collMeta.FieldIDToIndexID { -// if tuple.Key == fieldSchema.FieldID { -// if info, ok := mt.indexID2Meta[tuple.Value]; ok { -// if info.IsDeleted { -// continue -// } -// -// if idxInfo.IndexName != info.IndexName { -// return fmt.Errorf( -// "creating multiple indexes on same field is not supported, "+ -// "collection: %s, field: %s, index name: %s, new index name: %s", -// collMeta.Name, fieldSchema.Name, -// info.IndexName, idxInfo.IndexName) -// } -// } else { -// // TODO: unexpected: what if index id not exist? Meta incomplete. -// log.Warn("index meta was incomplete, index id missing in indexID2Meta", -// zap.String("collection", collMeta.Name), -// zap.String("field", fieldSchema.Name), -// zap.Int64("collection id", collMeta.CollectionID), -// zap.Int64("field id", fieldSchema.FieldID), -// zap.Int64("index id", tuple.Value)) -// } -// } -// } -// return nil -//} -// -//func (mt *MetaTable) checkFieldIndexDuplicate(collMeta model.Collection, fieldSchema model.Field, idxInfo *model.Index) (duplicate bool, idx *model.Index, err error) { -// for _, t := range collMeta.FieldIDToIndexID { -// if info, ok := mt.indexID2Meta[t.Value]; ok && !info.IsDeleted { -// if info.IndexName == idxInfo.IndexName { -// // the index name must be different for different indexes -// if t.Key != fieldSchema.FieldID || !EqualKeyPairArray(info.IndexParams, idxInfo.IndexParams) { -// return false, nil, fmt.Errorf("index already exists, collection: %s, field: %s, index: %s", collMeta.Name, fieldSchema.Name, idxInfo.IndexName) -// } -// -// // same index name, index params, and fieldId -// return true, info, nil -// } -// } -// } -// return false, nil, nil -//} -// -//// GetNotIndexedSegments return segment ids which have no index -//func (mt *MetaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *model.Index, segIDs []typeutil.UniqueID) ([]typeutil.UniqueID, model.Field, error) { -// mt.ddLock.Lock() -// defer mt.ddLock.Unlock() -// -// fieldSchema, err := mt.getFieldSchemaInternal(collName, fieldName) -// if err != nil { -// return nil, fieldSchema, err -// } -// -// rstID := make([]typeutil.UniqueID, 0, 16) -// for _, segID := range segIDs { -// if ok := mt.isSegmentIndexedInternal(segID, &fieldSchema, idxInfo.IndexParams); !ok { -// rstID = append(rstID, segID) -// } -// } -// return rstID, fieldSchema, nil -//} - -// AddIndex add index -//func (mt *MetaTable) AddIndex(colName string, fieldName string, idxInfo *model.Index, segIDs []typeutil.UniqueID) (bool, error) { -// mt.ddLock.Lock() -// defer mt.ddLock.Unlock() -// -// fieldSchema, err := mt.getFieldSchemaInternal(colName, fieldName) -// if err != nil { -// return false, err -// } -// -// collMeta, err := mt.getCollectionInfoInternal(colName) -// if err != nil { -// // error here if collection not found. -// return false, err -// } -// -// //TODO:: check index params for scalar field -// // set default index type for scalar index -// if !typeutil.IsVectorType(fieldSchema.DataType) { -// if fieldSchema.DataType == schemapb.DataType_VarChar { -// idxInfo.IndexParams = []*commonpb.KeyValuePair{{Key: "index_type", Value: DefaultStringIndexType}} -// } else { -// idxInfo.IndexParams = []*commonpb.KeyValuePair{{Key: "index_type", Value: DefaultIndexType}} -// } -// } -// -// if idxInfo.IndexParams == nil { -// return false, fmt.Errorf("index param is nil") -// } -// -// if err := mt.checkFieldCanBeIndexed(collMeta, fieldSchema, idxInfo); err != nil { -// return false, err -// } -// -// isDuplicated, dupIdxInfo, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) -// if err != nil { -// return isDuplicated, err -// } -// -// if isDuplicated { -// log.Info("index already exists, update timestamp for IndexID", -// zap.Any("indexTs", idxInfo.CreateTime), -// zap.Int64("indexID", dupIdxInfo.IndexID)) -// newIdxMeta := *dupIdxInfo -// newIdxMeta.CreateTime = idxInfo.CreateTime -// if err := mt.catalog.AlterIndex(mt.ctx, dupIdxInfo, &newIdxMeta, metastore.ADD); err != nil { -// return isDuplicated, err -// } -// mt.indexID2Meta[dupIdxInfo.IndexID] = &newIdxMeta -// } else { -// segmentIndexes := make(map[int64]model.SegmentIndex, len(segIDs)) -// for _, segID := range segIDs { -// segmentIndex := model.SegmentIndex{ -// Segment: model.Segment{ -// SegmentID: segID, -// }, -// EnableIndex: false, -// } -// segmentIndexes[segID] = segmentIndex -// } -// -// idxInfo.SegmentIndexes = segmentIndexes -// idxInfo.FieldID = fieldSchema.FieldID -// idxInfo.CollectionID = collMeta.CollectionID -// -// tuple := common.Int64Tuple{ -// Key: fieldSchema.FieldID, -// Value: idxInfo.IndexID, -// } -// collMeta.FieldIDToIndexID = append(collMeta.FieldIDToIndexID, tuple) -// if err := mt.catalog.CreateIndex(mt.ctx, &collMeta, idxInfo); err != nil { -// return isDuplicated, err -// } -// -// mt.collID2Meta[collMeta.CollectionID] = collMeta -// mt.indexID2Meta[idxInfo.IndexID] = idxInfo -// } -// -// return isDuplicated, nil -//} -// -//// GetIndexByName return index info by index name -//func (mt *MetaTable) GetIndexByName(collName, indexName string) (model.Collection, []model.Index, error) { -// mt.ddLock.RLock() -// defer mt.ddLock.RUnlock() -// -// collID, ok := mt.collName2ID[collName] -// if !ok { -// collID, ok = mt.collAlias2ID[collName] -// if !ok { -// return model.Collection{}, nil, fmt.Errorf("collection %s not found", collName) -// } -// } -// col, ok := mt.collID2Meta[collID] -// if !ok { -// return model.Collection{}, nil, fmt.Errorf("collection %s not found", collName) -// } -// -// rstIndex := make([]model.Index, 0, len(col.FieldIDToIndexID)) -// for _, t := range col.FieldIDToIndexID { -// indexID := t.Value -// idxInfo, ok := mt.indexID2Meta[indexID] -// if !ok { -// return model.Collection{}, nil, fmt.Errorf("index id = %d not found", indexID) -// } -// if idxInfo.IsDeleted { -// continue -// } -// if indexName == "" || idxInfo.IndexName == indexName { -// rstIndex = append(rstIndex, *idxInfo) -// } -// } -// return col, rstIndex, nil -//} -// -//// GetIndexByID return index info by index id -//func (mt *MetaTable) GetIndexByID(indexID typeutil.UniqueID) (*model.Index, error) { -// mt.ddLock.RLock() -// defer mt.ddLock.RUnlock() -// -// indexInfo, ok := mt.indexID2Meta[indexID] -// if !ok || indexInfo.IsDeleted { -// return nil, fmt.Errorf("cannot find index, id = %d", indexID) -// } -// return indexInfo, nil -//} - -func (mt *MetaTable) dupCollectionMeta() map[typeutil.UniqueID]model.Collection { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - return mt.collID2Meta -} - -//func (mt *MetaTable) dupMeta() ( -// map[typeutil.UniqueID]model.Collection, -// map[typeutil.UniqueID]typeutil.UniqueID, -// map[typeutil.UniqueID]model.Index, -//) { -// mt.ddLock.RLock() -// defer mt.ddLock.RUnlock() -// -// collID2Meta := make(map[typeutil.UniqueID]model.Collection, len(mt.collID2Meta)) -// //segID2IndexID := make(map[typeutil.UniqueID]typeutil.UniqueID, len(mt.segID2IndexID)) -// //indexID2Meta := make(map[typeutil.UniqueID]model.Index, len(mt.indexID2Meta)) -// for k, v := range mt.collID2Meta { -// collID2Meta[k] = v -// } -// //for k, v := range mt.segID2IndexID { -// // segID2IndexID[k] = v -// //} -// //for k, v := range mt.indexID2Meta { -// // indexID2Meta[k] = *v -// //} -// return collID2Meta, nil, nil -//} - -// AddAlias add collection alias -func (mt *MetaTable) AddAlias(collectionAlias string, collectionName string, ts typeutil.Timestamp) error { - mt.ddLock.Lock() - defer mt.ddLock.Unlock() - if _, ok := mt.collAlias2ID[collectionAlias]; ok { - return fmt.Errorf("duplicate collection alias, alias = %s", collectionAlias) - } - - if _, ok := mt.collName2ID[collectionAlias]; ok { - return fmt.Errorf("collection alias collides with existing collection name. collection = %s, alias = %s", collectionAlias, collectionAlias) - } - - id, ok := mt.collName2ID[collectionName] - if !ok { - return fmt.Errorf("aliased collection name does not exist, name = %s", collectionName) - } - - alias := &model.Alias{ - CollectionID: id, - Name: collectionAlias, - CreatedTime: ts, - } - if err := mt.catalog.CreateAlias(mt.ctx, alias, ts); err != nil { - return err - } - - mt.collAlias2ID[collectionAlias] = id - return nil -} - -// DropAlias drop collection alias -func (mt *MetaTable) DropAlias(collectionAlias string, ts typeutil.Timestamp) error { - mt.ddLock.Lock() - defer mt.ddLock.Unlock() - // TODO: drop alias should be idempotent. - _, ok := mt.collAlias2ID[collectionAlias] - if !ok { - return fmt.Errorf("alias does not exist, alias = %s", collectionAlias) - } - - if err := mt.catalog.DropAlias(mt.ctx, collectionAlias, ts); err != nil { - return err - } - delete(mt.collAlias2ID, collectionAlias) - return nil -} - -// AlterAlias alter collection alias -func (mt *MetaTable) AlterAlias(collectionAlias string, collectionName string, ts typeutil.Timestamp) error { - mt.ddLock.Lock() - defer mt.ddLock.Unlock() - if _, ok := mt.collAlias2ID[collectionAlias]; !ok { - return fmt.Errorf("alias does not exist, alias = %s", collectionAlias) - } - - id, ok := mt.collName2ID[collectionName] - if !ok { - return fmt.Errorf("aliased collection name does not exist, name = %s", collectionName) - } - - alias := &model.Alias{ - CollectionID: id, - Name: collectionAlias, - CreatedTime: ts, - } - if err := mt.catalog.AlterAlias(mt.ctx, alias, ts); err != nil { - return err - } - mt.collAlias2ID[collectionAlias] = id - return nil -} - -// IsAlias returns true if specific `collectionAlias` is an alias of collection. -func (mt *MetaTable) IsAlias(collectionAlias string) bool { - mt.ddLock.RLock() - defer mt.ddLock.RUnlock() - _, ok := mt.collAlias2ID[collectionAlias] - return ok + return common.InvalidPartitionID, fmt.Errorf("partition ") } // AddCredential add credential diff --git a/internal/rootcoord/meta_table_test.go b/internal/rootcoord/meta_table_test.go index e2a20740c8..144ee6106c 100644 --- a/internal/rootcoord/meta_table_test.go +++ b/internal/rootcoord/meta_table_test.go @@ -17,34 +17,25 @@ package rootcoord import ( - "context" "encoding/json" "errors" "fmt" "math/rand" "strings" - "sync" "testing" "time" + "github.com/milvus-io/milvus/internal/util/funcutil" + "github.com/milvus-io/milvus/internal/common" "github.com/milvus-io/milvus/internal/kv" - etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" - memkv "github.com/milvus-io/milvus/internal/kv/mem" - "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/metastore/kv/rootcoord" - "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" - "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/util" - "github.com/milvus-io/milvus/internal/util/etcd" - "github.com/milvus-io/milvus/internal/util/funcutil" "github.com/milvus-io/milvus/internal/util/typeutil" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" "go.uber.org/zap" ) @@ -122,508 +113,23 @@ func (m *mockTestTxnKV) RemoveWithPrefix(key string) error { func generateMetaTable(t *testing.T) (*MetaTable, *mockTestKV, *mockTestTxnKV, func()) { rand.Seed(time.Now().UnixNano()) - randVal := rand.Int() Params.Init() - rootPath := fmt.Sprintf("/test/meta/%d", randVal) - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - require.Nil(t, err) - skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) - assert.Nil(t, err) - assert.NotNil(t, skv) - - txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath) - _, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnkv, Snapshot: skv}) - assert.Nil(t, err) mockSnapshotKV := &mockTestKV{ - SnapShotKV: skv, - loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return skv.LoadWithPrefix(key, ts) - }, - } - mockTxnKV := &mockTestTxnKV{ - TxnKV: txnkv, - loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) }, - save: func(key, value string) error { return txnkv.Save(key, value) }, - multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) }, - multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error { - return txnkv.MultiSaveAndRemoveWithPrefix(kvs, removal) - }, - remove: func(key string) error { return txnkv.Remove(key) }, - } - - mockMt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockSnapshotKV}) - assert.Nil(t, err) - return mockMt, mockSnapshotKV, mockTxnKV, func() { - etcdCli.Close() - } -} - -func TestMetaTable(t *testing.T) { - const ( - collName = "testColl" - collNameInvalid = "testColl_invalid" - aliasName1 = "alias1" - aliasName2 = "alias2" - collID = typeutil.UniqueID(1) - collIDInvalid = typeutil.UniqueID(2) - partIDDefault = typeutil.UniqueID(10) - partID = typeutil.UniqueID(20) - partName = "testPart" - partIDInvalid = typeutil.UniqueID(21) - segID = typeutil.UniqueID(100) - segID2 = typeutil.UniqueID(101) - fieldID = typeutil.UniqueID(110) - fieldID2 = typeutil.UniqueID(111) - indexID = typeutil.UniqueID(10000) - indexID2 = typeutil.UniqueID(10001) - buildID = typeutil.UniqueID(201) - indexName = "testColl_index_110" - ) - - rand.Seed(time.Now().UnixNano()) - randVal := rand.Int() - Params.Init() - rootPath := fmt.Sprintf("/test/meta/%d", randVal) - - var vtso typeutil.Timestamp - ftso := func() typeutil.Timestamp { - vtso++ - return vtso - } - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - require.Nil(t, err) - defer etcdCli.Close() - - skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) - assert.Nil(t, err) - assert.NotNil(t, skv) - txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath) - mt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv}) - assert.Nil(t, err) - - collInfo := &model.Collection{ - CollectionID: collID, - Name: collName, - AutoID: false, - Fields: []*model.Field{ - { - FieldID: fieldID, - Name: "field110", - IsPrimaryKey: false, - Description: "", - DataType: schemapb.DataType_FloatVector, - TypeParams: []*commonpb.KeyValuePair{ - { - Key: "field110-k1", - Value: "field110-v1", - }, - { - Key: "field110-k2", - Value: "field110-v2", - }, - }, - IndexParams: []*commonpb.KeyValuePair{ - { - Key: "field110-i1", - Value: "field110-v1", - }, - { - Key: "field110-i2", - Value: "field110-v2", - }, - }, - }, - }, - CreateTime: 0, - Partitions: []*model.Partition{ - { - PartitionID: partIDDefault, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: 0, - }, - }, - VirtualChannelNames: []string{ - fmt.Sprintf("dmChannel_%dv%d", collID, 0), - fmt.Sprintf("dmChannel_%dv%d", collID, 1), - }, - PhysicalChannelNames: []string{ - funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 0)), - funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 1)), - }, - } - - var wg sync.WaitGroup - wg.Add(1) - t.Run("add collection", func(t *testing.T) { - defer wg.Done() - ts := ftso() - - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - assert.Equal(t, uint64(1), ts) - - collMeta, err := mt.GetCollectionByName(collName, ts) - assert.Nil(t, err) - assert.Equal(t, collMeta.CreateTime, ts) - assert.Equal(t, collMeta.Partitions[0].PartitionCreatedTimestamp, ts) - - assert.Equal(t, partIDDefault, collMeta.Partitions[0].PartitionID) - assert.Equal(t, 1, len(collMeta.Partitions)) - assert.True(t, mt.HasCollection(collInfo.CollectionID, 0)) - - field, err := mt.GetFieldSchema(collName, "field110") - assert.Nil(t, err) - assert.Equal(t, collInfo.Fields[0].FieldID, field.FieldID) - }) - - wg.Add(1) - t.Run("add alias", func(t *testing.T) { - defer wg.Done() - ts := ftso() - exists := mt.IsAlias(aliasName1) - assert.False(t, exists) - err = mt.AddAlias(aliasName1, collName, ts) - assert.Nil(t, err) - aliases := mt.ListAliases(collID) - assert.Equal(t, aliases, []string{aliasName1}) - exists = mt.IsAlias(aliasName1) - assert.True(t, exists) - }) - wg.Add(1) - t.Run("alter alias", func(t *testing.T) { - defer wg.Done() - ts := ftso() - err = mt.AlterAlias(aliasName1, collName, ts) - assert.Nil(t, err) - err = mt.AlterAlias(aliasName1, collNameInvalid, ts) - assert.NotNil(t, err) - }) - - wg.Add(1) - t.Run("delete alias", func(t *testing.T) { - defer wg.Done() - ts := ftso() - err = mt.DropAlias(aliasName1, ts) - assert.Nil(t, err) - }) - - wg.Add(1) - t.Run("not load alias when load collection meta", func(t *testing.T) { - defer wg.Done() - ts := ftso() - err = mt.AddAlias(aliasName1, collName, ts) - assert.Nil(t, err) - err = mt.reloadFromCatalog() - assert.Nil(t, err) - _, ok := mt.collName2ID[aliasName1] - assert.False(t, ok) - }) - - wg.Add(1) - t.Run("add partition", func(t *testing.T) { - defer wg.Done() - ts := ftso() - err = mt.AddPartition(collID, partName, partID, ts, "") - assert.Nil(t, err) - //assert.Equal(t, ts, uint64(2)) - - collMeta, ok := mt.collID2Meta[collID] - assert.True(t, ok) - assert.Equal(t, 2, len(collMeta.Partitions)) - assert.Equal(t, collMeta.Partitions[1].PartitionName, partName) - assert.Equal(t, ts, collMeta.Partitions[1].PartitionCreatedTimestamp) - }) - - wg.Add(1) - t.Run("drop partition", func(t *testing.T) { - defer wg.Done() - ts := ftso() - id, err := mt.DeletePartition(collID, partName, ts, "") - assert.Nil(t, err) - assert.Equal(t, partID, id) - }) - - wg.Add(1) - t.Run("drop collection", func(t *testing.T) { - defer wg.Done() - ts := ftso() - err = mt.DeleteCollection(collIDInvalid, ts, "") - assert.NotNil(t, err) - ts2 := ftso() - err = mt.AddAlias(aliasName2, collName, ts2) - assert.Nil(t, err) - err = mt.DeleteCollection(collID, ts, "") - assert.Nil(t, err) - ts3 := ftso() - err = mt.DropAlias(aliasName2, ts3) - assert.NotNil(t, err) - }) - - wg.Add(1) - t.Run("delete credential", func(t *testing.T) { - defer wg.Done() - - err = mt.DeleteCredential("") - assert.Nil(t, err) - - err = mt.DeleteCredential("abcxyz") - assert.Nil(t, err) - }) - - /////////////////////////// these tests should run at last, it only used to hit the error lines //////////////////////// - txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath) - mockKV := &mockTestKV{ loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) { return nil, nil, nil }, } mockTxnKV := &mockTestTxnKV{ - TxnKV: txnkv, - loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) }, - save: func(key, value string) error { return txnkv.Save(key, value) }, - multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) }, - multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error { - return txnkv.MultiSaveAndRemoveWithPrefix(kvs, removal) - }, - remove: func(key string) error { return txnkv.Remove(key) }, + loadWithPrefix: func(key string) ([]string, []string, error) { return nil, nil, nil }, + save: func(key, value string) error { return nil }, + multiSave: func(kvs map[string]string) error { return nil }, + multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error { return nil }, + remove: func(key string) error { return nil }, } - mt, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockKV}) - assert.Nil(t, err) - - wg.Add(1) - t.Run("add collection failed", func(t *testing.T) { - defer wg.Done() - mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - } - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return fmt.Errorf("multi save error") - } - collInfo.Partitions = []*model.Partition{} - assert.Error(t, mt.AddCollection(collInfo, 0, "")) - }) - - wg.Add(1) - t.Run("delete collection failed", func(t *testing.T) { - defer wg.Done() - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return nil - } - mockKV.multiSaveAndRemoveWithPrefix = func(save map[string]string, keys []string, ts typeutil.Timestamp) error { - return fmt.Errorf("multi save and remove with prefix error") - } - ts := ftso() - assert.Error(t, mt.DeleteCollection(collInfo.CollectionID, ts, "")) - }) - - wg.Add(1) - t.Run("get collection failed", func(t *testing.T) { - defer wg.Done() - mockKV.save = func(key string, value string, ts typeutil.Timestamp) error { - return nil - } - - ts := ftso() - collInfo.Partitions = []*model.Partition{} - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.GetCollectionByName(collInfo.Name, 0) - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find collection %s with id %d", collInfo.Name, collInfo.CollectionID)) - - }) - - wg.Add(1) - t.Run("add partition failed", func(t *testing.T) { - defer wg.Done() - mockKV.save = func(key string, value string, ts typeutil.Timestamp) error { - return nil - } - mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - } - err := mt.reloadFromCatalog() - assert.Nil(t, err) - - ts := ftso() - collInfo.Partitions = []*model.Partition{} - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - - ts = ftso() - err = mt.AddPartition(2, "no-part", 22, ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, "can't find collection. id = 2") - - coll := mt.collID2Meta[collInfo.CollectionID] - coll.Partitions = make([]*model.Partition, Params.RootCoordCfg.MaxPartitionNum) - mt.collID2Meta[coll.CollectionID] = coll - err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("maximum partition's number should be limit to %d", Params.RootCoordCfg.MaxPartitionNum)) - - coll.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}} - mt.collID2Meta[coll.CollectionID] = coll - - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return fmt.Errorf("multi save error") - } - tmpSaveFunc := mockKV.save - mockKV.save = func(key, value string, ts typeutil.Timestamp) error { - return errors.New("mock") - } - assert.Error(t, mt.AddPartition(coll.CollectionID, "no-part", 22, ts, "")) - mockKV.save = tmpSaveFunc - //err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, nil) - //assert.NotNil(t, err) - //assert.EqualError(t, err, "multi save error") - - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return nil - } - - collInfo.Partitions = []*model.Partition{} - ts = ftso() - err = mt.AddPartition(coll.CollectionID, partName, 22, ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("partition name = %s already exists", partName)) - err = mt.AddPartition(coll.CollectionID, "no-part", partID, ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("partition id = %d already exists", partID)) - }) - - wg.Add(1) - t.Run("has partition failed", func(t *testing.T) { - defer wg.Done() - mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - } - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return nil - } - err := mt.reloadFromCatalog() - assert.Nil(t, err) - - collInfo.Partitions = []*model.Partition{} - ts := ftso() - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - - assert.False(t, mt.HasPartition(collInfo.CollectionID, "no-partName", 0)) - - mt.collID2Meta = make(map[int64]model.Collection) - assert.False(t, mt.HasPartition(collInfo.CollectionID, partName, 0)) - }) - - wg.Add(1) - t.Run("delete partition failed", func(t *testing.T) { - defer wg.Done() - mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - } - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return nil - } - err := mt.reloadFromCatalog() - assert.Nil(t, err) - - collInfo.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}} - ts := ftso() - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - - ts = ftso() - _, err = mt.DeletePartition(collInfo.CollectionID, Params.CommonCfg.DefaultPartitionName, ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, "default partition cannot be deleted") - - _, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, "partition abc does not exist") - - mockKV.save = func(key, value string, ts typeutil.Timestamp) error { return errors.New("mocked error") } - _, err = mt.DeletePartition(collInfo.CollectionID, partName, ts, "") - assert.Error(t, err) - - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "") - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find collection id = %d", collInfo.CollectionID)) - }) - - wg.Add(1) - t.Run("get field schema failed", func(t *testing.T) { - defer wg.Done() - mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - } - mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { - return nil - } - mockKV.save = func(key string, value string, ts typeutil.Timestamp) error { - return nil - } - err := mt.reloadFromCatalog() - assert.Nil(t, err) - - collInfo.Partitions = []*model.Partition{} - ts := ftso() - err = mt.AddCollection(collInfo, ts, "") - assert.Nil(t, err) - - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.getFieldSchemaInternal(collInfo.Name, collInfo.Fields[0].Name) - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name)) - - mt.collName2ID = make(map[string]int64) - _, err = mt.getFieldSchemaInternal(collInfo.Name, collInfo.Fields[0].Name) - assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name)) - }) - - wg.Add(1) - t.Run("add credential failed", func(t *testing.T) { - defer wg.Done() - mockTxnKV.loadWithPrefix = func(key string) ([]string, []string, error) { - return []string{}, []string{}, nil - } - mockTxnKV.load = func(key string) (string, error) { - return "", errors.New("test error") - } - mockTxnKV.save = func(key, value string) error { - return fmt.Errorf("save error") - } - err = mt.AddCredential(&internalpb.CredentialInfo{Username: "x", EncryptedPassword: "a\xc5z"}) - assert.Error(t, err) - }) - - wg.Add(1) - t.Run("alter credential failed", func(t *testing.T) { - defer wg.Done() - mockTxnKV.save = func(key, value string) error { - return fmt.Errorf("save error") - } - err = mt.AlterCredential(&internalpb.CredentialInfo{Username: "", EncryptedPassword: "az"}) - assert.Error(t, err) - }) - - wg.Add(1) - t.Run("delete credential failed", func(t *testing.T) { - defer wg.Done() - mockTxnKV.remove = func(key string) error { - return fmt.Errorf("delete error") - } - err := mt.DeleteCredential("") - assert.Error(t, err) - }) - - wg.Wait() + mockMt := &MetaTable{catalog: &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockSnapshotKV}} + return mockMt, mockSnapshotKV, mockTxnKV, func() {} } func TestRbacCreateRole(t *testing.T) { @@ -1170,350 +676,3 @@ func TestRbacListUserRole(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 4, len(userRoles)) } - -func TestMetaWithTimestamp(t *testing.T) { - const ( - collID1 = typeutil.UniqueID(1) - collID2 = typeutil.UniqueID(2) - collName1 = "t1" - collName2 = "t2" - partID1 = 11 - partID2 = 12 - partName1 = "p1" - partName2 = "p2" - ) - rand.Seed(time.Now().UnixNano()) - randVal := rand.Int() - Params.Init() - rootPath := fmt.Sprintf("/test/meta/%d", randVal) - - var tsoStart typeutil.Timestamp = 100 - vtso := tsoStart - ftso := func() typeutil.Timestamp { - vtso++ - return vtso - } - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer etcdCli.Close() - - skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) - assert.Nil(t, err) - assert.NotNil(t, skv) - txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath) - mt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv}) - assert.Nil(t, err) - - collInfo := &model.Collection{ - CollectionID: collID1, - Name: collName1, - } - - collInfo.Partitions = []*model.Partition{{PartitionID: partID1, PartitionName: partName1, PartitionCreatedTimestamp: ftso()}} - t1 := ftso() - err = mt.AddCollection(collInfo, t1, "") - assert.Nil(t, err) - - collInfo.CollectionID = collID2 - collInfo.Partitions = []*model.Partition{{PartitionID: partID2, PartitionName: partName2, PartitionCreatedTimestamp: ftso()}} - collInfo.Name = collName2 - t2 := ftso() - err = mt.AddCollection(collInfo, t2, "") - assert.Nil(t, err) - - assert.True(t, mt.HasCollection(collID1, 0)) - assert.True(t, mt.HasCollection(collID2, 0)) - - assert.True(t, mt.HasCollection(collID1, t2)) - assert.True(t, mt.HasCollection(collID2, t2)) - - assert.True(t, mt.HasCollection(collID1, t1)) - assert.False(t, mt.HasCollection(collID2, t1)) - - assert.False(t, mt.HasCollection(collID1, tsoStart)) - assert.False(t, mt.HasCollection(collID2, tsoStart)) - - c1, err := mt.GetCollectionByID(collID1, 0) - assert.Nil(t, err) - c2, err := mt.GetCollectionByID(collID2, 0) - assert.Nil(t, err) - assert.Equal(t, collID1, c1.CollectionID) - assert.Equal(t, collID2, c2.CollectionID) - - c1, err = mt.GetCollectionByID(collID1, t2) - assert.Nil(t, err) - c2, err = mt.GetCollectionByID(collID2, t2) - assert.Nil(t, err) - assert.Equal(t, collID1, c1.CollectionID) - assert.Equal(t, collID2, c2.CollectionID) - - c1, err = mt.GetCollectionByID(collID1, t1) - assert.Nil(t, err) - c2, err = mt.GetCollectionByID(collID2, t1) - assert.NotNil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - - c1, err = mt.GetCollectionByID(collID1, tsoStart) - assert.NotNil(t, err) - c2, err = mt.GetCollectionByID(collID2, tsoStart) - assert.NotNil(t, err) - - c1, err = mt.GetCollectionByName(collName1, 0) - assert.Nil(t, err) - c2, err = mt.GetCollectionByName(collName2, 0) - assert.Nil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - assert.Equal(t, int64(2), c2.CollectionID) - - c1, err = mt.GetCollectionByName(collName1, t2) - assert.Nil(t, err) - c2, err = mt.GetCollectionByName(collName2, t2) - assert.Nil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - assert.Equal(t, int64(2), c2.CollectionID) - - c1, err = mt.GetCollectionByName(collName1, t1) - assert.Nil(t, err) - c2, err = mt.GetCollectionByName(collName2, t1) - assert.NotNil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - - c1, err = mt.GetCollectionByName(collName1, tsoStart) - assert.NotNil(t, err) - c2, err = mt.GetCollectionByName(collName2, tsoStart) - assert.NotNil(t, err) - - getKeys := func(m map[string]*model.Collection) []string { - keys := make([]string, 0, len(m)) - for key := range m { - keys = append(keys, key) - } - return keys - } - - s1, err := mt.ListCollections(0) - assert.Nil(t, err) - assert.Equal(t, 2, len(s1)) - assert.ElementsMatch(t, getKeys(s1), []string{collName1, collName2}) - - s1, err = mt.ListCollections(t2) - assert.Nil(t, err) - assert.Equal(t, 2, len(s1)) - assert.ElementsMatch(t, getKeys(s1), []string{collName1, collName2}) - - s1, err = mt.ListCollections(t1) - assert.Nil(t, err) - assert.Equal(t, 1, len(s1)) - assert.ElementsMatch(t, getKeys(s1), []string{collName1}) - - s1, err = mt.ListCollections(tsoStart) - assert.Nil(t, err) - assert.Equal(t, 0, len(s1)) - - p1, err := mt.GetPartitionByName(collID1, partName1, 0) - assert.Nil(t, err) - p2, err := mt.GetPartitionByName(collID2, partName2, 0) - assert.Nil(t, err) - assert.Equal(t, int64(partID1), p1) - assert.Equal(t, int64(partID2), p2) - - p1, err = mt.GetPartitionByName(collID1, partName1, t2) - assert.Nil(t, err) - p2, err = mt.GetPartitionByName(collID2, partName2, t2) - assert.Nil(t, err) - assert.Equal(t, int64(11), p1) - assert.Equal(t, int64(12), p2) - - p1, err = mt.GetPartitionByName(1, partName1, t1) - assert.Nil(t, err) - _, err = mt.GetPartitionByName(2, partName2, t1) - assert.NotNil(t, err) - assert.Equal(t, int64(11), p1) - - _, err = mt.GetPartitionByName(1, partName1, tsoStart) - assert.NotNil(t, err) - _, err = mt.GetPartitionByName(2, partName2, tsoStart) - assert.NotNil(t, err) - - var cID UniqueID - cID, err = mt.GetCollectionIDByName(collName1) - assert.NoError(t, err) - assert.Equal(t, collID1, cID) - - _, err = mt.GetCollectionIDByName("badname") - assert.Error(t, err) - - name, err := mt.GetCollectionNameByID(collID2) - assert.Nil(t, err) - assert.Equal(t, collName2, name) - - _, err = mt.GetCollectionNameByID(int64(999)) - assert.Error(t, err) -} - -func TestFixIssue10540(t *testing.T) { - rand.Seed(time.Now().UnixNano()) - randVal := rand.Int() - Params.Init() - rootPath := fmt.Sprintf("/test/meta/%d", randVal) - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer etcdCli.Close() - - skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) - assert.Nil(t, err) - assert.NotNil(t, skv) - txnKV := memkv.NewMemoryKV() - - _, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv}) - assert.Nil(t, err) -} - -func TestMetaTable_unlockGetCollectionInfo(t *testing.T) { - t.Run("normal case", func(t *testing.T) { - mt := &MetaTable{ - collName2ID: map[string]typeutil.UniqueID{"test": 100}, - collID2Meta: map[typeutil.UniqueID]model.Collection{ - 100: {CollectionID: 100, Name: "test"}, - }, - } - info, err := mt.getCollectionInfoInternal("test") - assert.NoError(t, err) - assert.Equal(t, UniqueID(100), info.CollectionID) - assert.Equal(t, "test", info.Name) - }) - - t.Run("collection name not found", func(t *testing.T) { - mt := &MetaTable{collName2ID: nil, collAlias2ID: nil} - _, err := mt.getCollectionInfoInternal("test") - assert.Error(t, err) - }) - - t.Run("name found, meta not found", func(t *testing.T) { - mt := &MetaTable{ - collName2ID: map[string]typeutil.UniqueID{"test": 100}, - collAlias2ID: nil, - collID2Meta: nil, - } - _, err := mt.getCollectionInfoInternal("test") - assert.Error(t, err) - }) - - t.Run("alias found, meta not found", func(t *testing.T) { - mt := &MetaTable{ - collName2ID: nil, - collAlias2ID: map[string]typeutil.UniqueID{"test": 100}, - collID2Meta: nil, - } - _, err := mt.getCollectionInfoInternal("test") - assert.Error(t, err) - }) -} - -type MockedCatalog struct { - mock.Mock - metastore.RootCoordCatalog - alterIndexParamsVerification func(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType) - createIndexParamsVerification func(ctx context.Context, col *model.Collection, index *model.Index) - dropIndexParamsVerification func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID) -} - -func (mc *MockedCatalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) { - args := mc.Called() - return args.Get(0).(map[string]*model.Collection), nil -} - -func (mc *MockedCatalog) ListIndexes(ctx context.Context) ([]*model.Index, error) { - args := mc.Called() - return args.Get(0).([]*model.Index), nil -} - -func (mc *MockedCatalog) ListAliases(ctx context.Context, ts typeutil.Timestamp) ([]*model.Alias, error) { - args := mc.Called() - return args.Get(0).([]*model.Alias), nil -} - -func (mc *MockedCatalog) AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType) error { - if mc.alterIndexParamsVerification != nil { - mc.alterIndexParamsVerification(ctx, oldIndex, newIndex, alterType) - } - args := mc.Called() - err := args.Get(0) - if err == nil { - return nil - } - return err.(error) -} - -func TestMetaTable_ReloadFromKV(t *testing.T) { - mc := &MockedCatalog{} - - collectionName := "cn" - collInfo := &model.Collection{ - CollectionID: 1, - Name: collectionName, - AutoID: false, - Fields: []*model.Field{ - { - FieldID: 1, - Name: "field110", - IsPrimaryKey: false, - Description: "", - DataType: schemapb.DataType_FloatVector, - TypeParams: []*commonpb.KeyValuePair{ - { - Key: "field110-k1", - Value: "field110-v1", - }, - }, - IndexParams: []*commonpb.KeyValuePair{ - { - Key: "field110-i1", - Value: "field110-v1", - }, - }, - }, - }, - Partitions: []*model.Partition{ - { - PartitionID: 1, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: 0, - }, - }, - Aliases: []string{"a", "b"}, - } - collections := map[string]*model.Collection{collectionName: collInfo} - mc.On("ListCollections").Return(collections, nil) - - alias1 := *collInfo - alias1.Name = collInfo.Aliases[0] - - alias2 := *collInfo - alias2.Name = collInfo.Aliases[1] - mc.On("ListAliases").Return([]*model.Alias{ - { - CollectionID: collInfo.CollectionID, - Name: collInfo.Aliases[0], - }, - { - CollectionID: collInfo.CollectionID, - Name: collInfo.Aliases[1], - }, - }, nil) - - mt := &MetaTable{} - mt.catalog = mc - mt.reloadFromCatalog() - - assert.True(t, len(mt.collID2Meta) == 1) - assert.Equal(t, mt.collID2Meta[1], *collInfo) - - assert.True(t, len(mt.collName2ID) == 1) - - assert.True(t, len(mt.collAlias2ID) == 2) - ret, ok := mt.collAlias2ID[collInfo.Aliases[0]] - assert.True(t, ok) - assert.Equal(t, int64(1), ret) -} diff --git a/internal/rootcoord/mock_test.go b/internal/rootcoord/mock_test.go new file mode 100644 index 0000000000..0f3b2a624c --- /dev/null +++ b/internal/rootcoord/mock_test.go @@ -0,0 +1,840 @@ +package rootcoord + +import ( + "context" + "errors" + "math/rand" + "os" + + "github.com/milvus-io/milvus/internal/mq/msgstream" + + "github.com/milvus-io/milvus/internal/proto/indexpb" + + "github.com/milvus-io/milvus/internal/allocator" + "github.com/milvus-io/milvus/internal/kv" + "github.com/milvus-io/milvus/internal/tso" + + "github.com/milvus-io/milvus/internal/log" + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/datapb" + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/proto/internalpb" + "github.com/milvus-io/milvus/internal/proto/proxypb" + "github.com/milvus-io/milvus/internal/proto/querypb" + "github.com/milvus-io/milvus/internal/types" + "github.com/milvus-io/milvus/internal/util/dependency" + "github.com/milvus-io/milvus/internal/util/metricsinfo" + "github.com/milvus-io/milvus/internal/util/retry" + "github.com/milvus-io/milvus/internal/util/sessionutil" + "github.com/milvus-io/milvus/internal/util/typeutil" + "go.uber.org/zap" +) + +const ( + TestProxyID = 100 + TestRootCoordID = 200 +) + +type mockMetaTable struct { + IMetaTable + ListCollectionsFunc func(ctx context.Context, ts Timestamp) ([]*model.Collection, error) + AddCollectionFunc func(ctx context.Context, coll *model.Collection) error + GetCollectionByNameFunc func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) + GetCollectionByIDFunc func(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) + ChangeCollectionStateFunc func(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error + RemoveCollectionFunc func(ctx context.Context, collectionID UniqueID, ts Timestamp) error + AddPartitionFunc func(ctx context.Context, partition *model.Partition) error + ChangePartitionStateFunc func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error + RemovePartitionFunc func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error + CreateAliasFunc func(ctx context.Context, alias string, collectionName string, ts Timestamp) error + AlterAliasFunc func(ctx context.Context, alias string, collectionName string, ts Timestamp) error + DropAliasFunc func(ctx context.Context, alias string, ts Timestamp) error + IsAliasFunc func(name string) bool + ListAliasesByIDFunc func(collID UniqueID) []string +} + +func (m mockMetaTable) ListCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) { + return m.ListCollectionsFunc(ctx, ts) +} + +func (m mockMetaTable) AddCollection(ctx context.Context, coll *model.Collection) error { + return m.AddCollectionFunc(ctx, coll) +} + +func (m mockMetaTable) GetCollectionByName(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return m.GetCollectionByNameFunc(ctx, collectionName, ts) +} + +func (m mockMetaTable) GetCollectionByID(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) { + return m.GetCollectionByIDFunc(ctx, collectionID, ts) +} + +func (m mockMetaTable) ChangeCollectionState(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error { + return m.ChangeCollectionStateFunc(ctx, collectionID, state, ts) +} + +func (m mockMetaTable) RemoveCollection(ctx context.Context, collectionID UniqueID, ts Timestamp) error { + return m.RemoveCollectionFunc(ctx, collectionID, ts) +} + +func (m mockMetaTable) AddPartition(ctx context.Context, partition *model.Partition) error { + return m.AddPartitionFunc(ctx, partition) +} + +func (m mockMetaTable) ChangePartitionState(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error { + return m.ChangePartitionStateFunc(ctx, collectionID, partitionID, state, ts) +} + +func (m mockMetaTable) RemovePartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error { + return m.RemovePartitionFunc(ctx, collectionID, partitionID, ts) +} + +func (m mockMetaTable) CreateAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + return m.CreateAliasFunc(ctx, alias, collectionName, ts) +} + +func (m mockMetaTable) AlterAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + return m.AlterAliasFunc(ctx, alias, collectionName, ts) +} + +func (m mockMetaTable) DropAlias(ctx context.Context, alias string, ts Timestamp) error { + return m.DropAliasFunc(ctx, alias, ts) +} + +func (m mockMetaTable) IsAlias(name string) bool { + return m.IsAliasFunc(name) +} + +func (m mockMetaTable) ListAliasesByID(collID UniqueID) []string { + return m.ListAliasesByIDFunc(collID) +} + +func newMockMetaTable() *mockMetaTable { + return &mockMetaTable{} +} + +type mockIndexCoord struct { + types.IndexCoord + GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error) + GetSegmentIndexStateFunc func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) + DropIndexFunc func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) +} + +func newMockIndexCoord() *mockIndexCoord { + return &mockIndexCoord{} +} + +func (m mockIndexCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) { + return m.GetComponentStatesFunc(ctx) +} + +func (m mockIndexCoord) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { + return m.GetSegmentIndexStateFunc(ctx, req) +} + +func (m mockIndexCoord) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) { + return m.DropIndexFunc(ctx, req) +} + +type mockDataCoord struct { + types.DataCoord + GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error) + WatchChannelsFunc func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) + AcquireSegmentLockFunc func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) + ReleaseSegmentLockFunc func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) + FlushFunc func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) + ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) +} + +func newMockDataCoord() *mockDataCoord { + return &mockDataCoord{} +} + +func (m *mockDataCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) { + return m.GetComponentStatesFunc(ctx) +} + +func (m *mockDataCoord) WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return m.WatchChannelsFunc(ctx, req) +} + +func (m *mockDataCoord) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) { + return m.AcquireSegmentLockFunc(ctx, req) +} + +func (m *mockDataCoord) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) { + return m.ReleaseSegmentLockFunc(ctx, req) +} + +func (m *mockDataCoord) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) { + return m.FlushFunc(ctx, req) +} + +func (m *mockDataCoord) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { + return m.ImportFunc(ctx, req) +} + +type mockQueryCoord struct { + types.QueryCoord + GetSegmentInfoFunc func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) + GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error) + ReleaseCollectionFunc func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) +} + +func (m mockQueryCoord) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) { + return m.GetSegmentInfoFunc(ctx, req) +} + +func (m mockQueryCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) { + return m.GetComponentStatesFunc(ctx) +} + +func (m mockQueryCoord) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { + return m.ReleaseCollectionFunc(ctx, req) +} + +func newMockQueryCoord() *mockQueryCoord { + return &mockQueryCoord{} +} + +func newMockIDAllocator() *allocator.MockGIDAllocator { + r := allocator.NewMockGIDAllocator() + r.AllocF = func(count uint32) (allocator.UniqueID, allocator.UniqueID, error) { + return 0, 0, nil + } + r.AllocOneF = func() (allocator.UniqueID, error) { + return 0, nil + } + return r +} + +func newMockTsoAllocator() *tso.MockAllocator { + r := tso.NewMockAllocator() + r.GenerateTSOF = func(count uint32) (uint64, error) { + return 0, nil + } + return r +} + +func newTxnKV() *kv.TxnKVMock { + r := kv.NewMockTxnKV() + r.SaveF = func(key, value string) error { + return nil + } + r.RemoveF = func(key string) error { + return nil + } + return r +} + +type mockProxy struct { + types.Proxy + InvalidateCollectionMetaCacheFunc func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) + InvalidateCredentialCacheFunc func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) + RefreshPolicyInfoCacheFunc func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) +} + +func (m mockProxy) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return m.InvalidateCollectionMetaCacheFunc(ctx, request) +} + +func (m mockProxy) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { + return m.InvalidateCredentialCacheFunc(ctx, request) +} + +func (m mockProxy) RefreshPolicyInfoCache(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { + return m.RefreshPolicyInfoCacheFunc(ctx, request) +} + +func newMockProxy() *mockProxy { + r := &mockProxy{} + r.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + return r +} + +func newTestCore(opts ...Opt) *Core { + c := &Core{ + session: &sessionutil.Session{ServerID: TestRootCoordID}, + } + for _, opt := range opts { + opt(c) + } + return c +} + +func withValidProxyManager() Opt { + return func(c *Core) { + c.proxyClientManager = &proxyClientManager{ + proxyClient: make(map[UniqueID]types.Proxy), + } + p := newMockProxy() + p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + } +} + +func withInvalidProxyManager() Opt { + return func(c *Core) { + c.proxyClientManager = &proxyClientManager{ + proxyClient: make(map[UniqueID]types.Proxy), + } + p := newMockProxy() + p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return succStatus(), errors.New("error mock InvalidateCollectionMetaCache") + } + c.proxyClientManager.proxyClient[TestProxyID] = p + } +} + +func withMeta(meta IMetaTable) Opt { + return func(c *Core) { + c.meta = meta + } +} + +func withInvalidMeta() Opt { + meta := newMockMetaTable() + meta.ListCollectionsFunc = func(ctx context.Context, ts Timestamp) ([]*model.Collection, error) { + return nil, errors.New("error mock ListCollections") + } + meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) { + return nil, errors.New("error mock GetCollectionByName") + } + meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID typeutil.UniqueID, ts Timestamp) (*model.Collection, error) { + return nil, errors.New("error mock GetCollectionByID") + } + meta.AddPartitionFunc = func(ctx context.Context, partition *model.Partition) error { + return errors.New("error mock AddPartition") + } + meta.ChangePartitionStateFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error { + return errors.New("error mock ChangePartitionState") + } + meta.CreateAliasFunc = func(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + return errors.New("error mock CreateAlias") + } + meta.AlterAliasFunc = func(ctx context.Context, alias string, collectionName string, ts Timestamp) error { + return errors.New("error mock AlterAlias") + } + meta.DropAliasFunc = func(ctx context.Context, alias string, ts Timestamp) error { + return errors.New("error mock DropAlias") + } + return withMeta(meta) +} + +func withIDAllocator(idAllocator allocator.GIDAllocator) Opt { + return func(c *Core) { + c.idAllocator = idAllocator + } +} + +func withValidIDAllocator() Opt { + idAllocator := newMockIDAllocator() + idAllocator.AllocOneF = func() (allocator.UniqueID, error) { + return rand.Int63(), nil + } + return withIDAllocator(idAllocator) +} + +func withInvalidIDAllocator() Opt { + idAllocator := newMockIDAllocator() + idAllocator.AllocOneF = func() (allocator.UniqueID, error) { + return -1, errors.New("error mock AllocOne") + } + idAllocator.AllocF = func(count uint32) (allocator.UniqueID, allocator.UniqueID, error) { + return -1, -1, errors.New("error mock Alloc") + } + return withIDAllocator(idAllocator) +} + +func withQueryCoord(qc types.QueryCoord) Opt { + return func(c *Core) { + c.queryCoord = qc + } +} + +func withUnhealthyQueryCoord() Opt { + qc := newMockQueryCoord() + qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal}, + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"), + }, retry.Unrecoverable(errors.New("error mock GetComponentStates")) + } + return withQueryCoord(qc) +} + +func withInvalidQueryCoord() Opt { + qc := newMockQueryCoord() + qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { + return nil, errors.New("error mock ReleaseCollection") + } + qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) { + return nil, errors.New("error mock GetSegmentInfo") + } + return withQueryCoord(qc) +} + +func withFailedQueryCoord() Opt { + qc := newMockQueryCoord() + qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "mock release collection error"), nil + } + qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) { + return &querypb.GetSegmentInfoResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock get segment info error"), + }, nil + } + return withQueryCoord(qc) +} + +func withValidQueryCoord() Opt { + qc := newMockQueryCoord() + qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) { + return &querypb.GetSegmentInfoResponse{ + Status: succStatus(), + }, nil + } + return withQueryCoord(qc) +} + +func withIndexCoord(ic types.IndexCoord) Opt { + return func(c *Core) { + c.indexCoord = ic + } +} + +func withUnhealthyIndexCoord() Opt { + ic := newMockIndexCoord() + ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal}, + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"), + }, retry.Unrecoverable(errors.New("error mock GetComponentStates")) + } + return withIndexCoord(ic) +} + +func withInvalidIndexCoord() Opt { + ic := newMockIndexCoord() + ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { + return &indexpb.GetSegmentIndexStateResponse{}, errors.New("error mock GetSegmentIndexState") + } + ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) { + return succStatus(), errors.New("error mock DropIndex") + } + return withIndexCoord(ic) +} + +func withFailedIndexCoord() Opt { + ic := newMockIndexCoord() + ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { + return &indexpb.GetSegmentIndexStateResponse{Status: failStatus(commonpb.ErrorCode_UnexpectedError, "reason mock GetSegmentIndexState")}, nil + } + ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "reason mock DropIndex"), nil + } + return withIndexCoord(ic) +} + +func withValidIndexCoord() Opt { + ic := newMockIndexCoord() + ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { + return &indexpb.GetSegmentIndexStateResponse{Status: succStatus()}, nil + } + ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + return withIndexCoord(ic) +} + +// cleanTestEnv clean test environment, for example, files generated by rocksmq. +func cleanTestEnv() { + path := "/tmp/milvus" + if err := os.RemoveAll(path); err != nil { + log.Warn("failed to clean test directories", zap.Error(err), zap.String("path", path)) + } + log.Debug("clean test environment", zap.String("path", path)) +} + +func withTtSynchronizer(ticker *timetickSync) Opt { + return func(c *Core) { + c.chanTimeTick = ticker + } +} + +func newRocksMqTtSynchronizer() *timetickSync { + Params.InitOnce() + Params.RootCoordCfg.DmlChannelNum = 4 + ctx := context.Background() + factory := dependency.NewDefaultFactory(true) + chans := map[UniqueID][]string{} + ticker := newTimeTickSync(ctx, TestRootCoordID, factory, chans) + return ticker +} + +// cleanTestEnv should be called if tested with this option. +func withRocksMqTtSynchronizer() Opt { + ticker := newRocksMqTtSynchronizer() + return withTtSynchronizer(ticker) +} + +func withDataCoord(dc types.DataCoord) Opt { + return func(c *Core) { + c.dataCoord = dc + } +} + +func withUnhealthyDataCoord() Opt { + dc := newMockDataCoord() + dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal}, + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"), + }, retry.Unrecoverable(errors.New("error mock GetComponentStates")) + } + return withDataCoord(dc) +} + +func withInvalidDataCoord() Opt { + dc := newMockDataCoord() + dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return nil, errors.New("error mock WatchChannels") + } + dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return nil, errors.New("error mock WatchChannels") + } + dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) { + return nil, errors.New("error mock AddSegRefLock") + } + dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) { + return nil, errors.New("error mock ReleaseSegRefLock") + } + dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) { + return nil, errors.New("error mock Flush") + } + dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { + return nil, errors.New("error mock Import") + } + return withDataCoord(dc) +} + +func withFailedDataCoord() Opt { + dc := newMockDataCoord() + dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return &datapb.WatchChannelsResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock watch channels error"), + }, nil + } + dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "mock add seg ref lock error"), nil + } + dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "mock release seg ref lock error"), nil + } + dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) { + return &datapb.FlushResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock flush error"), + }, nil + } + dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { + return &datapb.ImportTaskResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock import error"), + }, nil + } + return withDataCoord(dc) +} + +func withValidDataCoord() Opt { + dc := newMockDataCoord() + dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) { + return &internalpb.ComponentStates{ + State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy}, + Status: succStatus(), + }, nil + } + dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { + return &datapb.WatchChannelsResponse{ + Status: succStatus(), + }, nil + } + dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) { + return &datapb.FlushResponse{ + Status: succStatus(), + }, nil + } + dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { + return &datapb.ImportTaskResponse{ + Status: succStatus(), + }, nil + } + return withDataCoord(dc) +} + +func withStateCode(code internalpb.StateCode) Opt { + return func(c *Core) { + c.UpdateStateCode(code) + } +} + +func withHealthyCode() Opt { + return withStateCode(internalpb.StateCode_Healthy) +} + +func withAbnormalCode() Opt { + return withStateCode(internalpb.StateCode_Abnormal) +} + +type mockScheduler struct { + IScheduler + AddTaskFunc func(t taskV2) error +} + +func newMockScheduler() *mockScheduler { + return &mockScheduler{} +} + +func (m mockScheduler) AddTask(t taskV2) error { + if m.AddTaskFunc != nil { + return m.AddTaskFunc(t) + } + return nil +} + +func withScheduler(sched IScheduler) Opt { + return func(c *Core) { + c.scheduler = sched + } +} + +func withValidScheduler() Opt { + sched := newMockScheduler() + sched.AddTaskFunc = func(t taskV2) error { + t.NotifyDone(nil) + return nil + } + return withScheduler(sched) +} + +func withInvalidScheduler() Opt { + sched := newMockScheduler() + sched.AddTaskFunc = func(t taskV2) error { + return errors.New("error mock AddTask") + } + return withScheduler(sched) +} + +func withTaskFailScheduler() Opt { + sched := newMockScheduler() + sched.AddTaskFunc = func(t taskV2) error { + err := errors.New("error mock task fail") + t.NotifyDone(err) + return nil + } + return withScheduler(sched) +} + +func withTsoAllocator(alloc tso.Allocator) Opt { + return func(c *Core) { + c.tsoAllocator = alloc + } +} + +func withInvalidTsoAllocator() Opt { + alloc := newMockTsoAllocator() + alloc.GenerateTSOF = func(count uint32) (uint64, error) { + return 0, errors.New("error mock GenerateTSO") + } + return withTsoAllocator(alloc) +} + +func withMetricsCacheManager() Opt { + return func(c *Core) { + m := metricsinfo.NewMetricsCacheManager() + c.metricsCacheManager = m + } +} + +type mockBroker struct { + Broker + + ReleaseCollectionFunc func(ctx context.Context, collectionID UniqueID) error + GetQuerySegmentInfoFunc func(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) + + WatchChannelsFunc func(ctx context.Context, info *watchInfo) error + UnwatchChannelsFunc func(ctx context.Context, info *watchInfo) error + AddSegRefLockFunc func(ctx context.Context, taskID int64, segIDs []int64) error + ReleaseSegRefLockFunc func(ctx context.Context, taskID int64, segIDs []int64) error + FlushFunc func(ctx context.Context, cID int64, segIDs []int64) error + ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) + + DropCollectionIndexFunc func(ctx context.Context, collID UniqueID) error +} + +func newMockBroker() *mockBroker { + return &mockBroker{} +} + +func (b mockBroker) WatchChannels(ctx context.Context, info *watchInfo) error { + return b.WatchChannelsFunc(ctx, info) +} + +func (b mockBroker) UnwatchChannels(ctx context.Context, info *watchInfo) error { + return b.UnwatchChannelsFunc(ctx, info) +} + +func (b mockBroker) ReleaseCollection(ctx context.Context, collectionID UniqueID) error { + return b.ReleaseCollectionFunc(ctx, collectionID) +} + +func (b mockBroker) DropCollectionIndex(ctx context.Context, collID UniqueID) error { + return b.DropCollectionIndexFunc(ctx, collID) +} + +func withBroker(b Broker) Opt { + return func(c *Core) { + c.broker = b + } +} + +type mockGarbageCollector struct { + GarbageCollector + GcCollectionDataFunc func(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error + GcPartitionDataFunc func(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error +} + +func (m mockGarbageCollector) GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { + return m.GcCollectionDataFunc(ctx, coll, ts) +} + +func (m mockGarbageCollector) GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error { + return m.GcPartitionDataFunc(ctx, pChannels, partition, ts) +} + +func newMockGarbageCollector() *mockGarbageCollector { + return &mockGarbageCollector{} +} + +func withGarbageCollector(gc GarbageCollector) Opt { + return func(c *Core) { + c.garbageCollector = gc + } +} + +func newMockFailStream() *msgstream.MockMsgStream { + stream := msgstream.NewMockMsgStream() + stream.BroadcastFunc = func(pack *msgstream.MsgPack) error { + return errors.New("error mock Broadcast") + } + stream.BroadcastMarkFunc = func(pack *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) { + return nil, errors.New("error mock BroadcastMark") + } + stream.AsProducerFunc = func(channels []string) { + } + return stream +} + +func newMockFailStreamFactory() *msgstream.MockMqFactory { + f := msgstream.NewMockMqFactory() + f.NewMsgStreamFunc = func(ctx context.Context) (msgstream.MsgStream, error) { + return newMockFailStream(), nil + } + return f +} + +func newTickerWithMockFailStream() *timetickSync { + factory := newMockFailStreamFactory() + return newTickerWithFactory(factory) +} + +func newMockNormalStream() *msgstream.MockMsgStream { + stream := msgstream.NewMockMsgStream() + stream.BroadcastMarkFunc = func(pack *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) { + return map[string][]msgstream.MessageID{}, nil + } + stream.AsProducerFunc = func(channels []string) { + } + return stream +} + +func newMockNormalStreamFactory() *msgstream.MockMqFactory { + f := msgstream.NewMockMqFactory() + f.NewMsgStreamFunc = func(ctx context.Context) (msgstream.MsgStream, error) { + return newMockNormalStream(), nil + } + return f +} + +func newTickerWithMockNormalStream() *timetickSync { + factory := newMockNormalStreamFactory() + return newTickerWithFactory(factory) +} + +func newTickerWithFactory(factory msgstream.Factory) *timetickSync { + Params.InitOnce() + Params.RootCoordCfg.DmlChannelNum = 4 + ctx := context.Background() + chans := map[UniqueID][]string{} + ticker := newTimeTickSync(ctx, TestRootCoordID, factory, chans) + return ticker +} diff --git a/internal/rootcoord/proxy_client_manager.go b/internal/rootcoord/proxy_client_manager.go index e363ae9916..2eefb25f8d 100644 --- a/internal/rootcoord/proxy_client_manager.go +++ b/internal/rootcoord/proxy_client_manager.go @@ -18,6 +18,7 @@ package rootcoord import ( "context" + "errors" "fmt" "sync" @@ -31,8 +32,10 @@ import ( "github.com/milvus-io/milvus/internal/util/sessionutil" ) +type proxyCreator func(sess *sessionutil.Session) (types.Proxy, error) + type proxyClientManager struct { - core *Core + creator proxyCreator lock sync.RWMutex proxyClient map[int64]types.Proxy helper proxyClientManagerHelper @@ -46,9 +49,9 @@ var defaultClientManagerHelper = proxyClientManagerHelper{ afterConnect: func() {}, } -func newProxyClientManager(c *Core) *proxyClientManager { +func newProxyClientManager(creator proxyCreator) *proxyClientManager { return &proxyClientManager{ - core: c, + creator: creator, proxyClient: make(map[int64]types.Proxy), helper: defaultClientManagerHelper, } @@ -72,7 +75,7 @@ func (p *proxyClientManager) AddProxyClient(session *sessionutil.Session) { } func (p *proxyClientManager) connect(session *sessionutil.Session) { - pc, err := p.core.NewProxyClient(session) + pc, err := p.creator(session) if err != nil { log.Warn("failed to create proxy client", zap.String("address", session.Address), zap.Int64("serverID", session.ServerID), zap.Error(err)) return @@ -130,32 +133,7 @@ func (p *proxyClientManager) InvalidateCollectionMetaCache(ctx context.Context, return group.Wait() } -func (p *proxyClientManager) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) error { - p.lock.Lock() - defer p.lock.Unlock() - - if len(p.proxyClient) == 0 { - log.Warn("proxy client is empty, ReleaseDQLMessageStream will not send to any client") - return nil - } - - group := &errgroup.Group{} - for k, v := range p.proxyClient { - k, v := k, v - group.Go(func() error { - sta, err := v.ReleaseDQLMessageStream(ctx, in) - if err != nil { - return fmt.Errorf("ReleaseDQLMessageStream failed, proxyID = %d, err = %s", k, err) - } - if sta.ErrorCode != commonpb.ErrorCode_Success { - return fmt.Errorf("ReleaseDQLMessageStream failed, proxyID = %d, err = %s", k, sta.Reason) - } - return nil - }) - } - return group.Wait() -} - +// InvalidateCredentialCache TODO: too many codes similar to InvalidateCollectionMetaCache. func (p *proxyClientManager) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) error { p.lock.Lock() defer p.lock.Unlock() @@ -182,6 +160,7 @@ func (p *proxyClientManager) InvalidateCredentialCache(ctx context.Context, requ return group.Wait() } +// UpdateCredentialCache TODO: too many codes similar to InvalidateCollectionMetaCache. func (p *proxyClientManager) UpdateCredentialCache(ctx context.Context, request *proxypb.UpdateCredCacheRequest) error { p.lock.Lock() defer p.lock.Unlock() @@ -208,6 +187,7 @@ func (p *proxyClientManager) UpdateCredentialCache(ctx context.Context, request return group.Wait() } +// RefreshPolicyInfoCache TODO: too many codes similar to InvalidateCollectionMetaCache. func (p *proxyClientManager) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) error { p.lock.Lock() defer p.lock.Unlock() @@ -221,10 +201,13 @@ func (p *proxyClientManager) RefreshPolicyInfoCache(ctx context.Context, req *pr for k, v := range p.proxyClient { k, v := k, v group.Go(func() error { - _, err := v.RefreshPolicyInfoCache(ctx, req) + status, err := v.RefreshPolicyInfoCache(ctx, req) if err != nil { return fmt.Errorf("RefreshPolicyInfoCache failed, proxyID = %d, err = %s", k, err) } + if status.GetErrorCode() != commonpb.ErrorCode_Success { + return errors.New(status.GetReason()) + } return nil }) } diff --git a/internal/rootcoord/proxy_client_manager_test.go b/internal/rootcoord/proxy_client_manager_test.go index 3b6c6dc9c5..8a173e95ac 100644 --- a/internal/rootcoord/proxy_client_manager_test.go +++ b/internal/rootcoord/proxy_client_manager_test.go @@ -19,8 +19,12 @@ package rootcoord import ( "context" "errors" + "fmt" + "sync" "testing" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/stretchr/testify/assert" "github.com/milvus-io/milvus/internal/proto/proxypb" @@ -29,6 +33,74 @@ import ( "github.com/milvus-io/milvus/internal/util/sessionutil" ) +type proxyMock struct { + types.Proxy + collArray []string + collIDs []UniqueID + mutex sync.Mutex + + returnError bool + returnGrpcError bool +} + +func (p *proxyMock) Stop() error { + return nil +} + +func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + if p.returnError { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + }, nil + } + if p.returnGrpcError { + return nil, fmt.Errorf("grpc error") + } + p.collArray = append(p.collArray, request.CollectionName) + p.collIDs = append(p.collIDs, request.CollectionID) + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + }, nil +} + +func (p *proxyMock) GetCollArray() []string { + p.mutex.Lock() + defer p.mutex.Unlock() + ret := make([]string, 0, len(p.collArray)) + ret = append(ret, p.collArray...) + return ret +} + +func (p *proxyMock) GetCollIDs() []UniqueID { + p.mutex.Lock() + defer p.mutex.Unlock() + ret := p.collIDs + return ret +} + +func (p *proxyMock) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { + if p.returnError { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_UnexpectedError, + }, nil + } + if p.returnGrpcError { + return nil, fmt.Errorf("grpc error") + } + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + }, nil +} + +func (p *proxyMock) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + }, nil +} + func TestProxyClientManager_GetProxyClients(t *testing.T) { Params.Init() @@ -45,7 +117,7 @@ func TestProxyClientManager_GetProxyClients(t *testing.T) { }, ) - pcm := newProxyClientManager(core) + pcm := newProxyClientManager(core.proxyCreator) session := &sessionutil.Session{ ServerID: 100, @@ -72,7 +144,7 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) { }, ) - pcm := newProxyClientManager(core) + pcm := newProxyClientManager(core.proxyCreator) session := &sessionutil.Session{ ServerID: 100, @@ -83,197 +155,145 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) { } func TestProxyClientManager_InvalidateCollectionMetaCache(t *testing.T) { - Params.Init() - ctx := context.Background() - - core, err := NewCore(ctx, nil) - assert.Nil(t, err) - cli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer cli.Close() - core.etcdCli = cli - - pcm := newProxyClientManager(core) - - ch := make(chan struct{}) - pcm.helper = proxyClientManagerHelper{ - afterConnect: func() { ch <- struct{}{} }, - } - - err = pcm.InvalidateCollectionMetaCache(ctx, nil) - assert.NoError(t, err) - - core.SetNewProxyClient( - func(se *sessionutil.Session) (types.Proxy, error) { - return &proxyMock{}, nil - }, - ) - - session := &sessionutil.Session{ - ServerID: 100, - Address: "localhost", - } - - pcm.AddProxyClient(session) - <-ch - - err = pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{ - CollectionName: "collection0", + t.Run("empty proxy list", func(t *testing.T) { + ctx := context.Background() + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}} + err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) + assert.NoError(t, err) }) - assert.NoError(t, err) - // test releaseDQLMessageStream failed - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnError = true - } - err = pcm.InvalidateCollectionMetaCache(ctx, nil) - assert.Error(t, err) + t.Run("mock rpc error", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return succStatus(), errors.New("error mock InvalidateCollectionMetaCache") + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) + assert.Error(t, err) + }) - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnGrpcError = true - } - err = pcm.InvalidateCollectionMetaCache(ctx, nil) - assert.Error(t, err) -} + t.Run("mock error code", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) + assert.Error(t, err) + }) -func TestProxyClientManager_ReleaseDQLMessageStream(t *testing.T) { - Params.Init() - ctx := context.Background() - - core, err := NewCore(ctx, nil) - assert.Nil(t, err) - cli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer cli.Close() - core.etcdCli = cli - - pcm := newProxyClientManager(core) - - ch := make(chan struct{}) - pcm.helper = proxyClientManagerHelper{ - afterConnect: func() { ch <- struct{}{} }, - } - - err = pcm.ReleaseDQLMessageStream(ctx, nil) - assert.NoError(t, err) - - core.SetNewProxyClient( - func(se *sessionutil.Session) (types.Proxy, error) { - return &proxyMock{}, nil - }, - ) - - session := &sessionutil.Session{ - ServerID: 100, - Address: "localhost", - } - - pcm.AddProxyClient(session) - <-ch - - err = pcm.ReleaseDQLMessageStream(ctx, nil) - assert.NoError(t, err) - - // test releaseDQLMessageStream failed - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnError = true - } - err = pcm.ReleaseDQLMessageStream(ctx, nil) - assert.Error(t, err) - - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnGrpcError = true - } - err = pcm.ReleaseDQLMessageStream(ctx, nil) - assert.Error(t, err) + t.Run("normal case", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) + assert.NoError(t, err) + }) } func TestProxyClientManager_InvalidateCredentialCache(t *testing.T) { - Params.Init() - ctx := context.Background() + t.Run("empty proxy list", func(t *testing.T) { + ctx := context.Background() + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}} + err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{}) + assert.NoError(t, err) + }) - core, err := NewCore(ctx, nil) - assert.Nil(t, err) - cli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer cli.Close() - core.etcdCli = cli + t.Run("mock rpc error", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { + return succStatus(), errors.New("error mock InvalidateCredentialCache") + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{}) + assert.Error(t, err) + }) - pcm := newProxyClientManager(core) + t.Run("mock error code", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{}) + assert.Error(t, err) + }) - ch := make(chan struct{}) - pcm.helper = proxyClientManagerHelper{ - afterConnect: func() { ch <- struct{}{} }, - } - - err = pcm.InvalidateCredentialCache(ctx, nil) - assert.NoError(t, err) - - core.SetNewProxyClient( - func(se *sessionutil.Session) (types.Proxy, error) { - return &proxyMock{}, nil - }, - ) - - session := &sessionutil.Session{ - ServerID: 100, - Address: "localhost", - } - - pcm.AddProxyClient(session) - <-ch - - err = pcm.InvalidateCredentialCache(ctx, nil) - assert.NoError(t, err) - - // test releaseDQLMessageStream failed - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnError = true - } - err = pcm.InvalidateCredentialCache(ctx, nil) - assert.Error(t, err) - - for _, v := range pcm.proxyClient { - v.(*proxyMock).returnGrpcError = true - } - err = pcm.InvalidateCredentialCache(ctx, nil) - assert.Error(t, err) + t.Run("normal case", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{}) + assert.NoError(t, err) + }) } func TestProxyClientManager_RefreshPolicyInfoCache(t *testing.T) { - Params.Init() - ctx := context.Background() + t.Run("empty proxy list", func(t *testing.T) { + ctx := context.Background() + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}} + err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{}) + assert.NoError(t, err) + }) - core, err := NewCore(ctx, nil) - assert.Nil(t, err) - cli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.Nil(t, err) - defer cli.Close() - core.etcdCli = cli + t.Run("mock rpc error", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { + return succStatus(), errors.New("error mock RefreshPolicyInfoCache") + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{}) + assert.Error(t, err) + }) - pcm := newProxyClientManager(core) - - ch := make(chan struct{}) - pcm.helper = proxyClientManagerHelper{ - afterConnect: func() { ch <- struct{}{} }, - } - err = pcm.RefreshPolicyInfoCache(ctx, nil) - assert.NoError(t, err) - - core.SetNewProxyClient( - func(se *sessionutil.Session) (types.Proxy, error) { - return &proxyMock{}, nil - }, - ) - - session := &sessionutil.Session{ - ServerID: 100, - Address: "localhost", - } - pcm.AddProxyClient(session) - <-ch - - err = pcm.RefreshPolicyInfoCache(ctx, nil) - assert.NoError(t, err) + t.Run("mock error code", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { + return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{}) + assert.Error(t, err) + }) + t.Run("normal case", func(t *testing.T) { + ctx := context.Background() + p1 := newMockProxy() + p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { + return succStatus(), nil + } + pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{ + TestProxyID: p1, + }} + err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{}) + assert.NoError(t, err) + }) } diff --git a/internal/rootcoord/redo.go b/internal/rootcoord/redo.go new file mode 100644 index 0000000000..a96f2d1db7 --- /dev/null +++ b/internal/rootcoord/redo.go @@ -0,0 +1,57 @@ +package rootcoord + +import ( + "context" + "time" + + "github.com/milvus-io/milvus/internal/log" + "go.uber.org/zap" +) + +type baseRedoTask struct { + syncTodoStep []Step // steps to execute synchronously + asyncTodoStep []Step // steps to execute asynchronously +} + +func newBaseRedoTask() *baseRedoTask { + return &baseRedoTask{ + syncTodoStep: make([]Step, 0), + asyncTodoStep: make([]Step, 0), + } +} + +func (b *baseRedoTask) AddSyncStep(step Step) { + b.syncTodoStep = append(b.syncTodoStep, step) +} + +func (b *baseRedoTask) AddAsyncStep(step Step) { + b.asyncTodoStep = append(b.asyncTodoStep, step) +} + +func (b *baseRedoTask) redoAsyncSteps() { + // You cannot just use the ctx of task, since it will be canceled after response is returned. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + for i := 0; i < len(b.asyncTodoStep); i++ { + todo := b.asyncTodoStep[i] + if err := todo.Execute(ctx); err != nil { + // You depend on the collection meta to do other gc. + // TODO: add ddl logger after other service can be idempotent enough, then you can do separate steps + // independently. + log.Error("failed to execute step, garbage may be generated", zap.Error(err)) + return + } + } +} + +func (b *baseRedoTask) Execute(ctx context.Context) error { + for i := 0; i < len(b.syncTodoStep); i++ { + todo := b.syncTodoStep[i] + if err := todo.Execute(ctx); err != nil { + log.Error("failed to execute step", zap.Error(err)) + return err + } + } + go b.redoAsyncSteps() + return nil +} diff --git a/internal/rootcoord/redo_test.go b/internal/rootcoord/redo_test.go new file mode 100644 index 0000000000..c78bbaadac --- /dev/null +++ b/internal/rootcoord/redo_test.go @@ -0,0 +1,119 @@ +package rootcoord + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockFailStep struct { + calledChan chan struct{} + called bool +} + +func newMockFailStep() *mockFailStep { + return &mockFailStep{calledChan: make(chan struct{}, 1), called: false} +} + +func (m *mockFailStep) Execute(ctx context.Context) error { + m.called = true + m.calledChan <- struct{}{} + return errors.New("error mock Execute") +} + +type mockNormalStep struct { + calledChan chan struct{} + called bool +} + +func newMockNormalStep() *mockNormalStep { + return &mockNormalStep{calledChan: make(chan struct{}, 1), called: false} +} + +func (m *mockNormalStep) Execute(ctx context.Context) error { + m.called = true + m.calledChan <- struct{}{} + return nil +} + +func Test_baseRedoTask_redoAsyncSteps(t *testing.T) { + t.Run("partial error", func(t *testing.T) { + redo := newBaseRedoTask() + steps := []Step{newMockNormalStep(), newMockFailStep(), newMockNormalStep()} + for _, step := range steps { + redo.AddAsyncStep(step) + } + redo.redoAsyncSteps() + assert.True(t, steps[0].(*mockNormalStep).called) + assert.False(t, steps[2].(*mockNormalStep).called) + }) + + t.Run("normal case", func(t *testing.T) { + redo := newBaseRedoTask() + n := 10 + steps := make([]Step, 0, n) + for i := 0; i < n; i++ { + steps = append(steps, newMockNormalStep()) + } + for _, step := range steps { + redo.AddAsyncStep(step) + } + redo.redoAsyncSteps() + for _, step := range steps { + assert.True(t, step.(*mockNormalStep).called) + } + }) +} + +func Test_baseRedoTask_Execute(t *testing.T) { + t.Run("sync not finished, no async task", func(t *testing.T) { + redo := newBaseRedoTask() + syncSteps := []Step{newMockFailStep()} + asyncNum := 10 + asyncSteps := make([]Step, 0, asyncNum) + for i := 0; i < asyncNum; i++ { + asyncSteps = append(asyncSteps, newMockNormalStep()) + } + for _, step := range asyncSteps { + redo.AddAsyncStep(step) + } + for _, step := range syncSteps { + redo.AddSyncStep(step) + } + err := redo.Execute(context.Background()) + assert.Error(t, err) + for _, step := range asyncSteps { + assert.False(t, step.(*mockNormalStep).called) + } + }) + + // TODO: sync finished, but some async fail. + + t.Run("normal case", func(t *testing.T) { + redo := newBaseRedoTask() + syncNum := 10 + syncSteps := make([]Step, 0, syncNum) + asyncNum := 10 + asyncSteps := make([]Step, 0, asyncNum) + for i := 0; i < syncNum; i++ { + syncSteps = append(syncSteps, newMockNormalStep()) + } + for i := 0; i < asyncNum; i++ { + asyncSteps = append(asyncSteps, newMockNormalStep()) + } + for _, step := range asyncSteps { + redo.AddAsyncStep(step) + } + for _, step := range syncSteps { + redo.AddSyncStep(step) + } + err := redo.Execute(context.Background()) + assert.NoError(t, err) + for _, step := range asyncSteps { + <-step.(*mockNormalStep).calledChan + assert.True(t, step.(*mockNormalStep).called) + } + }) +} diff --git a/internal/rootcoord/root_coord.go b/internal/rootcoord/root_coord.go index 2b2b9bb08d..f92f99dfaa 100644 --- a/internal/rootcoord/root_coord.go +++ b/internal/rootcoord/root_coord.go @@ -28,26 +28,27 @@ import ( "syscall" "time" - "github.com/milvus-io/milvus/internal/allocator" - "github.com/milvus-io/milvus/internal/common" - "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" - "github.com/milvus-io/milvus/internal/log" + + "github.com/milvus-io/milvus/internal/metastore/db/rootcoord" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/kv" "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/metastore/db/dao" "github.com/milvus-io/milvus/internal/metastore/db/dbcore" - rootcoord2 "github.com/milvus-io/milvus/internal/metastore/db/rootcoord" - "github.com/milvus-io/milvus/internal/metastore/kv/rootcoord" + kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv/rootcoord" + + "github.com/milvus-io/milvus/internal/allocator" + "github.com/milvus-io/milvus/internal/common" + "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metrics" - ms "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/datapb" - "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/proxypb" - "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/internal/tso" "github.com/milvus-io/milvus/internal/types" @@ -70,124 +71,60 @@ import ( // UniqueID is an alias of typeutil.UniqueID. type UniqueID = typeutil.UniqueID +// Timestamp is an alias of typeutil.Timestamp +type Timestamp = typeutil.Timestamp + const InvalidCollectionID = UniqueID(0) // ------------------ struct ----------------------- -// DdOperation used to save ddMsg into etcd -type DdOperation struct { - Body []byte `json:"body"` - Type string `json:"type"` -} - -func metricProxy(v int64) string { - return fmt.Sprintf("client_%d", v) -} - var Params paramtable.ComponentParam +type Opt func(*Core) + +type metaKVCreator func(root string) (kv.MetaKv, error) + +func defaultMetaKVCreator(etcdCli *clientv3.Client) metaKVCreator { + return func(root string) (kv.MetaKv, error) { + return etcdkv.NewEtcdKV(etcdCli, root), nil + } +} + // Core root coordinator core type Core struct { - MetaTable *MetaTable - //id allocator - IDAllocator func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) - IDAllocatorUpdate func() error + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + etcdCli *clientv3.Client + meta IMetaTable + scheduler IScheduler + broker Broker + garbageCollector GarbageCollector - //tso allocator - TSOAllocator func(count uint32) (typeutil.Timestamp, error) - TSOAllocatorUpdate func() error - TSOGetLastSavedTime func() time.Time + metaKVCreator metaKVCreator - //inner members - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - etcdCli *clientv3.Client - kvBase kv.TxnKV //*etcdkv.EtcdKV - impTaskKv kv.MetaKv - - //DDL lock - ddlLock sync.Mutex - - kvBaseCreate func(root string) (kv.TxnKV, error) - - metaKVCreate func(root string) (kv.MetaKv, error) - - //setMsgStreams, send time tick into dd channel and time tick channel - SendTimeTick func(t typeutil.Timestamp, reason string) error - - //setMsgStreams, send create collection into dd channel - //returns corresponding message id for each channel - SendDdCreateCollectionReq func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error) - - //setMsgStreams, send drop collection into dd channel, and notify the proxy to delete this collection - SendDdDropCollectionReq func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error - - //setMsgStreams, send create partition into dd channel - SendDdCreatePartitionReq func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error - - //setMsgStreams, send drop partition into dd channel - SendDdDropPartitionReq func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error - - //get segment info from data service - CallGetFlushedSegmentsService func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) - CallGetRecoveryInfoService func(ctx context.Context, collID, partID UniqueID) ([]*datapb.SegmentBinlogs, error) - - //call index builder's client to build index, return build id or get index state. - CallDropCollectionIndexService func(ctx context.Context, collID UniqueID) error - CallGetSegmentIndexStateService func(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) - - NewProxyClient func(sess *sessionutil.Session) (types.Proxy, error) - - //query service interface, notify query service to release collection - CallReleaseCollectionService func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID) error - CallReleasePartitionService func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) error - - // Communicates with queryCoord service for segments info. - CallGetSegmentInfoService func(ctx context.Context, collectionID int64, segIDs []int64) (*querypb.GetSegmentInfoResponse, error) - - CallWatchChannels func(ctx context.Context, collectionID int64, channelNames []string, startPositions []*commonpb.KeyDataPair) error - - //assign import task to data service - CallImportService func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse - - // Seals segments in collection cID, so they can get flushed later. - CallFlushOnCollection func(ctx context.Context, cID int64, segIDs []int64) error - - // CallAddSegRefLock triggers AcquireSegmentLock method on DataCoord. - CallAddSegRefLock func(ctx context.Context, taskID int64, segIDs []int64) (retErr error) - - // CallReleaseSegRefLock triggers ReleaseSegmentLock method on DataCoord. - CallReleaseSegRefLock func(ctx context.Context, taskID int64, segIDs []int64) (retErr error) - - //Proxy manager - proxyManager *proxyManager - - // proxy clients + proxyCreator proxyCreator + proxyManager *proxyManager proxyClientManager *proxyClientManager - // metrics cache manager metricsCacheManager *metricsinfo.MetricsCacheManager - // channel timetick chanTimeTick *timetickSync - //time tick loop - lastTimeTick typeutil.Timestamp + idAllocator allocator.GIDAllocator + tsoAllocator tso.Allocator + + dataCoord types.DataCoord + queryCoord types.QueryCoord + indexCoord types.IndexCoord - //states code stateCode atomic.Value - - //call once initOnce sync.Once startOnce sync.Once - //isInit atomic.Value - - session *sessionutil.Session + session *sessionutil.Session factory dependency.Factory - //import manager importManager *importManager } @@ -200,7 +137,6 @@ func NewCore(c context.Context, factory dependency.Factory) (*Core, error) { core := &Core{ ctx: ctx, cancel: cancel, - ddlLock: sync.Mutex{}, factory: factory, } core.UpdateStateCode(internalpb.StateCode_Abnormal) @@ -218,92 +154,23 @@ func (c *Core) checkHealthy() (internalpb.StateCode, bool) { return code, ok } -func failStatus(code commonpb.ErrorCode, reason string) *commonpb.Status { - return &commonpb.Status{ - ErrorCode: code, - Reason: reason, +func (c *Core) sendTimeTick(t Timestamp, reason string) error { + pc := c.chanTimeTick.listDmlChannels() + pt := make([]uint64, len(pc)) + for i := 0; i < len(pt); i++ { + pt[i] = t } -} - -func succStatus() *commonpb.Status { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", + ttMsg := internalpb.ChannelTimeTickMsg{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_TimeTick, + Timestamp: t, + SourceID: c.session.ServerID, + }, + ChannelNames: pc, + Timestamps: pt, + DefaultTimestamp: t, } -} - -func (c *Core) checkInit() error { - if c.MetaTable == nil { - return fmt.Errorf("metaTable is nil") - } - if c.IDAllocator == nil { - return fmt.Errorf("idAllocator is nil") - } - if c.IDAllocatorUpdate == nil { - return fmt.Errorf("idAllocatorUpdate is nil") - } - if c.TSOAllocator == nil { - return fmt.Errorf("tsoAllocator is nil") - } - if c.TSOAllocatorUpdate == nil { - return fmt.Errorf("tsoAllocatorUpdate is nil") - } - if c.etcdCli == nil { - return fmt.Errorf("etcdCli is nil") - } - if c.kvBase == nil { - return fmt.Errorf("kvBase is nil") - } - if c.impTaskKv == nil { - return fmt.Errorf("impTaskKv is nil") - } - if c.SendDdCreateCollectionReq == nil { - return fmt.Errorf("sendDdCreateCollectionReq is nil") - } - if c.SendDdDropCollectionReq == nil { - return fmt.Errorf("sendDdDropCollectionReq is nil") - } - if c.SendDdCreatePartitionReq == nil { - return fmt.Errorf("sendDdCreatePartitionReq is nil") - } - if c.SendDdDropPartitionReq == nil { - return fmt.Errorf("sendDdDropPartitionReq is nil") - } - if c.CallGetFlushedSegmentsService == nil { - return fmt.Errorf("callGetFlushedSegmentsService is nil") - } - if c.CallGetRecoveryInfoService == nil { - return fmt.Errorf("CallGetRecoveryInfoService is nil") - } - if c.CallDropCollectionIndexService == nil { - return fmt.Errorf("callDropIndexService is nil") - } - if c.CallGetSegmentIndexStateService == nil { - return fmt.Errorf("callGetSegmentIndexStateService is nil") - } - if c.CallWatchChannels == nil { - return fmt.Errorf("callWatchChannels is nil") - } - if c.NewProxyClient == nil { - return fmt.Errorf("newProxyClient is nil") - } - if c.CallReleaseCollectionService == nil { - return fmt.Errorf("callReleaseCollectionService is nil") - } - if c.CallReleasePartitionService == nil { - return fmt.Errorf("callReleasePartitionService is nil") - } - if c.CallImportService == nil { - return fmt.Errorf("callImportService is nil") - } - if c.CallAddSegRefLock == nil { - return fmt.Errorf("callAddSegRefLock is nil") - } - if c.CallReleaseSegRefLock == nil { - return fmt.Errorf("callReleaseSegRefLock is nil") - } - - return nil + return c.chanTimeTick.updateTimeTick(&ttMsg, reason) } func (c *Core) startTimeTickLoop() { @@ -312,17 +179,14 @@ func (c *Core) startTimeTickLoop() { for { select { case <-c.ctx.Done(): - log.Debug("rootcoord context closed", zap.Error(c.ctx.Err())) return case <-ticker.C: - c.ddlLock.Lock() - if ts, err := c.TSOAllocator(1); err == nil { - err := c.SendTimeTick(ts, "timetick loop") + if ts, err := c.tsoAllocator.GenerateTSO(1); err == nil { + err := c.sendTimeTick(ts, "timetick loop") if err != nil { - log.Warn("Failed to send timetick", zap.Error(err)) + log.Warn("failed to send timetick", zap.Error(err)) } } - c.ddlLock.Unlock() } } } @@ -336,470 +200,56 @@ func (c *Core) tsLoop() { for { select { case <-tsoTicker.C: - if err := c.TSOAllocatorUpdate(); err != nil { + if err := c.tsoAllocator.UpdateTSO(); err != nil { log.Warn("failed to update timestamp: ", zap.Error(err)) continue } - ts := c.TSOGetLastSavedTime() + ts := c.tsoAllocator.GetLastSavedTime() metrics.RootCoordTimestampSaved.Set(float64(ts.Unix())) - if err := c.IDAllocatorUpdate(); err != nil { + if err := c.tsoAllocator.UpdateTSO(); err != nil { log.Warn("failed to update id: ", zap.Error(err)) continue } case <-ctx.Done(): - // Server is closed and it should return nil. - log.Debug("tsLoop is closed") return } } } -func (c *Core) getSegments(ctx context.Context, collID typeutil.UniqueID) (map[UniqueID]UniqueID, map[UniqueID]*datapb.SegmentBinlogs, error) { - collMeta, err := c.MetaTable.GetCollectionByID(collID, 0) - if err != nil { - return nil, nil, err - } - segID2PartID := make(map[UniqueID]UniqueID) - segID2Binlog := make(map[UniqueID]*datapb.SegmentBinlogs) - for _, part := range collMeta.Partitions { - if segs, err := c.CallGetRecoveryInfoService(ctx, collID, part.PartitionID); err == nil { - for _, s := range segs { - segID2PartID[s.SegmentID] = part.PartitionID - segID2Binlog[s.SegmentID] = s - } - } else { - log.Error("failed to get flushed segments info from dataCoord", - zap.Int64("collection ID", collID), - zap.Int64("partition ID", part.PartitionID), - zap.Error(err)) - return nil, nil, err - } - } - - return segID2PartID, segID2Binlog, nil -} - -func (c *Core) setMsgStreams() error { - if Params.CommonCfg.RootCoordSubName == "" { - return fmt.Errorf("RootCoordSubName is empty") - } - - c.SendTimeTick = func(t typeutil.Timestamp, reason string) error { - pc := c.chanTimeTick.listDmlChannels() - pt := make([]uint64, len(pc)) - for i := 0; i < len(pt); i++ { - pt[i] = t - } - ttMsg := internalpb.ChannelTimeTickMsg{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_TimeTick, - MsgID: 0, //TODO - Timestamp: t, - SourceID: c.session.ServerID, - }, - ChannelNames: pc, - Timestamps: pt, - DefaultTimestamp: t, - } - return c.chanTimeTick.updateTimeTick(&ttMsg, reason) - } - - c.SendDdCreateCollectionReq = func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error) { - msgPack := ms.MsgPack{} - baseMsg := ms.BaseMsg{ - Ctx: ctx, - BeginTimestamp: req.Base.Timestamp, - EndTimestamp: req.Base.Timestamp, - HashValues: []uint32{0}, - } - msg := &ms.CreateCollectionMsg{ - BaseMsg: baseMsg, - CreateCollectionRequest: *req, - } - msgPack.Msgs = append(msgPack.Msgs, msg) - return c.chanTimeTick.broadcastMarkDmlChannels(channelNames, &msgPack) - } - - c.SendDdDropCollectionReq = func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error { - msgPack := ms.MsgPack{} - baseMsg := ms.BaseMsg{ - Ctx: ctx, - BeginTimestamp: req.Base.Timestamp, - EndTimestamp: req.Base.Timestamp, - HashValues: []uint32{0}, - } - msg := &ms.DropCollectionMsg{ - BaseMsg: baseMsg, - DropCollectionRequest: *req, - } - msgPack.Msgs = append(msgPack.Msgs, msg) - return c.chanTimeTick.broadcastDmlChannels(channelNames, &msgPack) - } - - c.SendDdCreatePartitionReq = func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error { - msgPack := ms.MsgPack{} - baseMsg := ms.BaseMsg{ - Ctx: ctx, - BeginTimestamp: req.Base.Timestamp, - EndTimestamp: req.Base.Timestamp, - HashValues: []uint32{0}, - } - msg := &ms.CreatePartitionMsg{ - BaseMsg: baseMsg, - CreatePartitionRequest: *req, - } - msgPack.Msgs = append(msgPack.Msgs, msg) - return c.chanTimeTick.broadcastDmlChannels(channelNames, &msgPack) - } - - c.SendDdDropPartitionReq = func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error { - msgPack := ms.MsgPack{} - baseMsg := ms.BaseMsg{ - Ctx: ctx, - BeginTimestamp: req.Base.Timestamp, - EndTimestamp: req.Base.Timestamp, - HashValues: []uint32{0}, - } - msg := &ms.DropPartitionMsg{ - BaseMsg: baseMsg, - DropPartitionRequest: *req, - } - msgPack.Msgs = append(msgPack.Msgs, msg) - return c.chanTimeTick.broadcastDmlChannels(channelNames, &msgPack) - } - - return nil -} - -// SetNewProxyClient set client to create proxy func (c *Core) SetNewProxyClient(f func(sess *sessionutil.Session) (types.Proxy, error)) { - if c.NewProxyClient == nil { - c.NewProxyClient = f - } else { - log.Debug("NewProxyClient has already set") - } + c.proxyCreator = f } -// SetDataCoord set dataCoord. func (c *Core) SetDataCoord(ctx context.Context, s types.DataCoord) error { - initCh := make(chan struct{}) - go func() { - for { - if err := s.Init(); err == nil { - if err := s.Start(); err == nil { - close(initCh) - log.Debug("RootCoord connected to DataCoord") - return - } - } - log.Debug("Retrying RootCoord connection to DataCoord") - } - }() - - c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) (retSegIDs []typeutil.UniqueID, retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("get flushed segments from data coord panic, msg = %v", err) - } - }() - <-initCh - req := &datapb.GetFlushedSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: 0, //TODO,msg type - MsgID: 0, - Timestamp: 0, - SourceID: c.session.ServerID, - }, - CollectionID: collID, - PartitionID: partID, - } - rsp, err := s.GetFlushedSegments(ctx, req) - if err != nil { - return nil, err - } - if rsp.Status.ErrorCode != commonpb.ErrorCode_Success { - return nil, fmt.Errorf("get flushed segments from data coord failed, reason = %s", rsp.Status.Reason) - } - return rsp.Segments, nil + if err := s.Init(); err != nil { + return err } - - c.CallGetRecoveryInfoService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]*datapb.SegmentBinlogs, error) { - getSegmentInfoReq := &datapb.GetRecoveryInfoRequest{ - Base: &commonpb.MsgBase{ - MsgType: 0, //TODO, msg type - MsgID: 0, - Timestamp: 0, - SourceID: c.session.ServerID, - }, - CollectionID: collID, - PartitionID: partID, - } - resp, err := s.GetRecoveryInfo(ctx, getSegmentInfoReq) - if err != nil { - return nil, err - } - if resp.Status.ErrorCode != commonpb.ErrorCode_Success { - return nil, errors.New(resp.Status.Reason) - } - return resp.Binlogs, nil + if err := s.Start(); err != nil { + return err } - - c.CallWatchChannels = func(ctx context.Context, collectionID int64, channelNames []string, startPositions []*commonpb.KeyDataPair) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("watch channels panic, msg = %v", err) - } - }() - <-initCh - req := &datapb.WatchChannelsRequest{ - CollectionID: collectionID, - ChannelNames: channelNames, - StartPositions: startPositions, - } - rsp, err := s.WatchChannels(ctx, req) - if err != nil { - return err - } - if rsp.Status.ErrorCode != commonpb.ErrorCode_Success { - return fmt.Errorf("data coord watch channels failed, reason = %s", rsp.Status.Reason) - } - return nil - } - - c.CallImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse { - resp := &datapb.ImportTaskResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - } - defer func() { - if err := recover(); err != nil { - resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError - resp.Status.Reason = "assign import task to data coord panic" - } - }() - resp, _ = s.Import(ctx, req) - return resp - } - - c.CallFlushOnCollection = func(ctx context.Context, cID int64, segIDs []int64) error { - resp, err := s.Flush(ctx, &datapb.FlushRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Flush, - SourceID: c.session.ServerID, - }, - DbID: 0, - SegmentIDs: segIDs, - CollectionID: cID, - }) - if err != nil { - return errors.New("failed to call flush to data coordinator: " + err.Error()) - } - if resp.Status.ErrorCode != commonpb.ErrorCode_Success { - return errors.New(resp.Status.Reason) - } - log.Info("flush on collection succeed", zap.Int64("collection ID", cID)) - return nil - } - - c.CallAddSegRefLock = func(ctx context.Context, taskID int64, segIDs []int64) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("add seg ref lock panic, msg = %v", err) - } - }() - <-initCh - log.Info("acquiring seg lock", - zap.Int64s("segment IDs", segIDs), - zap.Int64("node ID", c.session.ServerID)) - resp, _ := s.AcquireSegmentLock(ctx, &datapb.AcquireSegmentLockRequest{ - SegmentIDs: segIDs, - NodeID: c.session.ServerID, - TaskID: taskID, - }) - if resp.GetErrorCode() != commonpb.ErrorCode_Success { - return fmt.Errorf("failed to acquire segment lock %s", resp.GetReason()) - } - log.Info("acquire seg lock succeed", - zap.Int64s("segment IDs", segIDs), - zap.Int64("node ID", c.session.ServerID)) - return nil - } - - c.CallReleaseSegRefLock = func(ctx context.Context, taskID int64, segIDs []int64) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("release seg ref lock panic, msg = %v", err) - } - }() - <-initCh - log.Info("releasing seg lock", - zap.Int64s("segment IDs", segIDs), - zap.Int64("node ID", c.session.ServerID)) - resp, _ := s.ReleaseSegmentLock(ctx, &datapb.ReleaseSegmentLockRequest{ - SegmentIDs: segIDs, - NodeID: c.session.ServerID, - TaskID: taskID, - }) - if resp.GetErrorCode() != commonpb.ErrorCode_Success { - return fmt.Errorf("failed to release segment lock %s", resp.GetReason()) - } - log.Info("release seg lock succeed", - zap.Int64s("segment IDs", segIDs), - zap.Int64("node ID", c.session.ServerID)) - return nil - } - + c.dataCoord = s return nil } -// SetIndexCoord sets IndexCoord. func (c *Core) SetIndexCoord(s types.IndexCoord) error { - initCh := make(chan struct{}) - go func() { - for { - if err := s.Init(); err == nil { - if err := s.Start(); err == nil { - close(initCh) - log.Debug("RootCoord connected to IndexCoord") - return - } - } - log.Debug("Retrying RootCoord connection to IndexCoord") - } - }() - - c.CallGetSegmentIndexStateService = func(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) (states []*indexpb.SegmentIndexState, retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("get segment state panic, msg = %v", err) - } - }() - <-initCh - - resp, err := s.GetSegmentIndexState(ctx, &indexpb.GetSegmentIndexStateRequest{ - CollectionID: collID, - IndexName: indexName, - SegmentIDs: segIDs, - }) - if err != nil { - return nil, err - } - if resp.Status.ErrorCode != commonpb.ErrorCode_Success { - return nil, errors.New(resp.Status.Reason) - } - - return resp.GetStates(), nil + if err := s.Init(); err != nil { + return err } - - c.CallDropCollectionIndexService = func(ctx context.Context, collID UniqueID) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("drop collection index panic, msg = %v", err) - } - }() - <-initCh - - rsp, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{ - CollectionID: collID, - IndexName: "", - }) - if err != nil { - return err - } - if rsp.ErrorCode != commonpb.ErrorCode_Success { - return fmt.Errorf(rsp.Reason) - } - return nil + if err := s.Start(); err != nil { + return err } + c.indexCoord = s return nil } -// SetQueryCoord sets up queryCoord and queryCoord related function calls. func (c *Core) SetQueryCoord(s types.QueryCoord) error { - initCh := make(chan struct{}) - go func() { - for { - if err := s.Init(); err == nil { - if err := s.Start(); err == nil { - close(initCh) - log.Debug("RootCoord connected to QueryCoord") - return - } - } - log.Debug("Retrying RootCoord connection to QueryCoord") - } - }() - c.CallReleaseCollectionService = func(ctx context.Context, ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("release collection from query service panic, msg = %v", err) - } - }() - <-initCh - req := &querypb.ReleaseCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ReleaseCollection, - MsgID: 0, //TODO, msg ID - Timestamp: ts, - SourceID: c.session.ServerID, - }, - DbID: dbID, - CollectionID: collectionID, - } - rsp, err := s.ReleaseCollection(ctx, req) - if err != nil { - return err - } - if rsp.ErrorCode != commonpb.ErrorCode_Success { - return fmt.Errorf("releaseCollection from query service failed, error = %s", rsp.Reason) - } - return nil + if err := s.Init(); err != nil { + return err } - c.CallReleasePartitionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) (retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("release partition from query service panic, msg = %v", err) - } - }() - <-initCh - req := &querypb.ReleasePartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ReleasePartitions, - MsgID: 0, //TODO, msg ID - Timestamp: ts, - SourceID: c.session.ServerID, - }, - DbID: dbID, - CollectionID: collectionID, - PartitionIDs: partitionIDs, - } - rsp, err := s.ReleasePartitions(ctx, req) - if err != nil { - return err - } - if rsp.ErrorCode != commonpb.ErrorCode_Success { - return fmt.Errorf("releasePartitions from query service failed, error = %s", rsp.Reason) - } - return nil - } - c.CallGetSegmentInfoService = func(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) { - defer func() { - if err := recover(); err != nil { - retErr = fmt.Errorf("call segment info service panic, msg = %v", err) - } - }() - <-initCh - resp, err := s.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_GetSegmentState, - SourceID: c.session.ServerID, - }, - CollectionID: collectionID, - SegmentIDs: segIDs, - }) - return resp, err + if err := s.Start(); err != nil { + return err } + c.queryCoord = s return nil } @@ -854,7 +304,6 @@ func (c *Core) Register() error { }) c.UpdateStateCode(internalpb.StateCode_Healthy) - log.Debug("RootCoord start successfully ", zap.String("State Code", internalpb.StateCode_Healthy.String())) return nil } @@ -873,162 +322,166 @@ func (c *Core) initSession() error { return nil } +func (c *Core) initKVCreator() { + if c.metaKVCreator == nil { + c.metaKVCreator = defaultMetaKVCreator(c.etcdCli) + } +} + +func (c *Core) initMetaTable() error { + fn := func() error { + var catalog metastore.RootCoordCatalog + var err error + + switch Params.MetaStoreCfg.MetaStoreType { + case util.MetaStoreTypeEtcd: + var metaKV kv.MetaKv + var ss *kvmetestore.SuffixSnapshot + var err error + + if metaKV, err = c.metaKVCreator(Params.EtcdCfg.MetaRootPath); err != nil { + return err + } + + if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, snapshotsSep, Params.EtcdCfg.MetaRootPath, snapshotPrefix); err != nil { + return err + } + + catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} + case util.MetaStoreTypeMysql: + // connect to database + err := dbcore.Connect(&Params.DBCfg) + if err != nil { + return err + } + + catalog = rootcoord.NewTableCatalog(dbcore.NewTxImpl(), dao.NewMetaDomain()) + default: + return retry.Unrecoverable(fmt.Errorf("not supported meta store: %s", Params.MetaStoreCfg.MetaStoreType)) + } + + if c.meta, err = NewMetaTable(c.ctx, catalog); err != nil { + return err + } + + return nil + } + + return retry.Do(c.ctx, fn, retry.Attempts(10)) +} + +func (c *Core) initIDAllocator() error { + tsoKV := tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath, globalIDAllocatorSubPath) + idAllocator := allocator.NewGlobalIDAllocator(globalIDAllocatorKey, tsoKV) + if err := idAllocator.Initialize(); err != nil { + return err + } + c.idAllocator = idAllocator + return nil +} + +func (c *Core) initTSOAllocator() error { + tsoKV := tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath, globalTSOAllocatorSubPath) + tsoAllocator := tso.NewGlobalTSOAllocator(globalTSOAllocatorKey, tsoKV) + if err := tsoAllocator.Initialize(); err != nil { + return err + } + c.tsoAllocator = tsoAllocator + + return nil +} + +func (c *Core) initImportManager() error { + impTaskKv, err := c.metaKVCreator(Params.EtcdCfg.KvRootPath) + if err != nil { + return err + } + + f := NewImportFactory(c) + c.importManager = newImportManager( + c.ctx, + impTaskKv, + f.NewIDAllocator(), + f.NewImportFunc(), + f.NewGetCollectionNameFunc(), + ) + c.importManager.init(c.ctx) + + return nil +} + +func (c *Core) initInternal() error { + if err := c.initSession(); err != nil { + return err + } + + c.initKVCreator() + + if err := c.initMetaTable(); err != nil { + return err + } + + if err := c.initIDAllocator(); err != nil { + return err + } + + if err := c.initTSOAllocator(); err != nil { + return err + } + + c.scheduler = newScheduler(c.ctx, c.idAllocator, c.tsoAllocator) + + c.factory.Init(&Params) + + chanMap := c.meta.ListCollectionPhysicalChannels() + c.chanTimeTick = newTimeTickSync(c.ctx, c.session.ServerID, c.factory, chanMap) + c.chanTimeTick.addSession(c.session) + c.proxyClientManager = newProxyClientManager(c.proxyCreator) + + c.broker = newServerBroker(c) + c.garbageCollector = newGarbageCollectorCtx(c) + + c.proxyManager = newProxyManager( + c.ctx, + c.etcdCli, + c.chanTimeTick.initSessions, + c.proxyClientManager.GetProxyClients, + ) + c.proxyManager.AddSessionFunc(c.chanTimeTick.addSession, c.proxyClientManager.AddProxyClient) + c.proxyManager.DelSessionFunc(c.chanTimeTick.delSession, c.proxyClientManager.DelProxyClient) + + c.metricsCacheManager = metricsinfo.NewMetricsCacheManager() + + if err := c.initImportManager(); err != nil { + return err + } + + if err := c.initCredentials(); err != nil { + return err + } + + if err := c.initRbac(); err != nil { + return err + } + + return nil +} + // Init initialize routine func (c *Core) Init() error { var initError error - if c.kvBaseCreate == nil { - c.kvBaseCreate = func(root string) (kv.TxnKV, error) { - return etcdkv.NewEtcdKV(c.etcdCli, root), nil - } - } - if c.metaKVCreate == nil { - c.metaKVCreate = func(root string) (kv.MetaKv, error) { - return etcdkv.NewEtcdKV(c.etcdCli, root), nil - } - } c.initOnce.Do(func() { - if err := c.initSession(); err != nil { - initError = err - log.Error("RootCoord init session failed", zap.Error(err)) - return - } - connectEtcdFn := func() error { - if c.kvBase, initError = c.kvBaseCreate(Params.EtcdCfg.KvRootPath); initError != nil { - log.Error("RootCoord failed to new EtcdKV for kvBase", zap.Any("reason", initError)) - return initError - } - if c.impTaskKv, initError = c.metaKVCreate(Params.EtcdCfg.KvRootPath); initError != nil { - log.Error("RootCoord failed to new EtcdKV for MetaKV", zap.Any("reason", initError)) - return initError - } - - var catalog metastore.RootCoordCatalog - switch Params.MetaStoreCfg.MetaStoreType { - case util.MetaStoreTypeEtcd: - var metaKV kv.TxnKV - metaKV, initError = c.kvBaseCreate(Params.EtcdCfg.MetaRootPath) - if initError != nil { - log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError)) - return initError - } - - var ss *rootcoord.SuffixSnapshot - if ss, initError = rootcoord.NewSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil { - log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError)) - return initError - } - - catalog = &rootcoord.Catalog{Txn: metaKV, Snapshot: ss} - case util.MetaStoreTypeMysql: - // connect to database - err := dbcore.Connect(&Params.DBCfg) - if err != nil { - return err - } - - catalog = rootcoord2.NewTableCatalog(dbcore.NewTxImpl(), dao.NewMetaDomain()) - default: - return fmt.Errorf("not supported meta store: %s", Params.MetaStoreCfg.MetaStoreType) - } - - if c.MetaTable, initError = NewMetaTable(c.ctx, catalog); initError != nil { - log.Error("RootCoord failed to new MetaTable", zap.Any("reason", initError)) - return initError - } - - return nil - } - log.Debug("RootCoord, Connecting to Etcd", zap.String("kv root", Params.EtcdCfg.KvRootPath), zap.String("meta root", Params.EtcdCfg.MetaRootPath)) - err := retry.Do(c.ctx, connectEtcdFn, retry.Attempts(100)) - if err != nil { - return - } - - log.Debug("RootCoord, Setting TSO and ID Allocator") - kv := tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath, "gid") - idAllocator := allocator.NewGlobalIDAllocator("idTimestamp", kv) - if initError = idAllocator.Initialize(); initError != nil { - return - } - c.IDAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - return idAllocator.Alloc(count) - } - c.IDAllocatorUpdate = func() error { - return idAllocator.UpdateID() - } - - kv = tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath, "tso") - tsoAllocator := tso.NewGlobalTSOAllocator("timestamp", kv) - if initError = tsoAllocator.Initialize(); initError != nil { - return - } - c.TSOAllocator = func(count uint32) (typeutil.Timestamp, error) { - return tsoAllocator.Alloc(count) - } - c.TSOAllocatorUpdate = func() error { - return tsoAllocator.UpdateTSO() - } - c.TSOGetLastSavedTime = func() time.Time { - return tsoAllocator.GetLastSavedTime() - } - - c.factory.Init(&Params) - - chanMap := c.MetaTable.ListCollectionPhysicalChannels() - c.chanTimeTick = newTimeTickSync(c.ctx, c.session.ServerID, c.factory, chanMap) - c.chanTimeTick.addSession(c.session) - c.proxyClientManager = newProxyClientManager(c) - - log.Debug("RootCoord, set proxy manager") - c.proxyManager = newProxyManager( - c.ctx, - c.etcdCli, - c.chanTimeTick.initSessions, - c.proxyClientManager.GetProxyClients, - ) - c.proxyManager.AddSessionFunc(c.chanTimeTick.addSession, c.proxyClientManager.AddProxyClient) - c.proxyManager.DelSessionFunc(c.chanTimeTick.delSession, c.proxyClientManager.DelProxyClient) - - c.metricsCacheManager = metricsinfo.NewMetricsCacheManager() - - initError = c.setMsgStreams() - if initError != nil { - return - } - - c.importManager = newImportManager( - c.ctx, - c.impTaskKv, - c.IDAllocator, - c.CallImportService, - c.getCollectionName, - ) - c.importManager.init(c.ctx) - - // init data - initError = c.initData() - if initError != nil { - return - } - - if initError = c.initRbac(); initError != nil { - return - } - log.Debug("RootCoord init user root done") + initError = c.initInternal() }) - if initError != nil { - log.Debug("RootCoord init error", zap.Error(initError)) - } - log.Debug("RootCoord init done") return initError } -func (c *Core) initData() error { - credInfo, _ := c.MetaTable.GetCredential(util.UserRoot) +func (c *Core) initCredentials() error { + credInfo, _ := c.meta.GetCredential(util.UserRoot) if credInfo == nil { log.Debug("RootCoord init user root") encryptedRootPassword, _ := crypto.PasswordEncrypt(util.DefaultRootPassword) - err := c.MetaTable.AddCredential(&internalpb.CredentialInfo{Username: util.UserRoot, EncryptedPassword: encryptedRootPassword}) + err := c.meta.AddCredential(&internalpb.CredentialInfo{Username: util.UserRoot, EncryptedPassword: encryptedRootPassword}) return err } return nil @@ -1037,7 +490,7 @@ func (c *Core) initData() error { func (c *Core) initRbac() (initError error) { // create default roles, including admin, public for _, role := range util.DefaultRoles { - if initError = c.MetaTable.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: role}); initError != nil { + if initError = c.meta.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: role}); initError != nil { if common.IsIgnorableError(initError) { initError = nil continue @@ -1056,7 +509,7 @@ func (c *Core) initRbac() (initError error) { } for _, globalPrivilege := range globalPrivileges { - if initError = c.MetaTable.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ + if initError = c.meta.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: util.RolePublic}, Object: &milvuspb.ObjectEntity{Name: commonpb.ObjectType_Global.String()}, ObjectName: util.AnyWord, @@ -1073,7 +526,7 @@ func (c *Core) initRbac() (initError error) { } } for _, collectionPrivilege := range collectionPrivileges { - if initError = c.MetaTable.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ + if initError = c.meta.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: util.RolePublic}, Object: &milvuspb.ObjectEntity{Name: commonpb.ObjectType_Collection.String()}, ObjectName: util.AnyWord, @@ -1092,51 +545,81 @@ func (c *Core) initRbac() (initError error) { return nil } -func (c *Core) getCollectionName(collID, partitionID typeutil.UniqueID) (string, string, error) { - colName, err := c.MetaTable.GetCollectionNameByID(collID) +func (c *Core) restore(ctx context.Context) error { + colls, err := c.meta.ListAbnormalCollections(ctx, typeutil.MaxTimestamp) if err != nil { - log.Error("RootCoord failed to get collection name by id", zap.Int64("ID", collID), zap.Error(err)) - return "", "", err + return err } - partName, err := c.MetaTable.GetPartitionNameByID(collID, partitionID, 0) - if err != nil { - log.Error("RootCoord failed to get partition name by id", zap.Int64("ID", partitionID), zap.Error(err)) - return colName, "", err + for _, coll := range colls { + ts, err := c.tsoAllocator.GenerateTSO(1) + if err != nil { + return err + } + + switch coll.State { + case pb.CollectionState_CollectionDropping: + go c.garbageCollector.ReDropCollection(coll.Clone(), ts) + case pb.CollectionState_CollectionCreating: + go c.garbageCollector.RemoveCreatingCollection(coll.Clone()) + default: + } } - return colName, partName, nil + colls, err = c.meta.ListCollections(ctx, typeutil.MaxTimestamp) + if err != nil { + return err + } + for _, coll := range colls { + for _, part := range coll.Partitions { + ts, err := c.tsoAllocator.GenerateTSO(1) + if err != nil { + return err + } + + switch part.State { + case pb.PartitionState_PartitionDropping: + go c.garbageCollector.ReDropPartition(coll.PhysicalChannelNames, part.Clone(), ts) + default: + } + } + } + return nil +} + +func (c *Core) startInternal() error { + if err := c.proxyManager.WatchProxy(); err != nil { + log.Fatal("rootcoord failed to watch proxy", zap.Error(err)) + // you can not just stuck here, + panic(err) + } + + if err := c.restore(c.ctx); err != nil { + panic(err) + } + + c.wg.Add(5) + go c.tsLoop() + go c.startTimeTickLoop() + go c.chanTimeTick.startWatch(&c.wg) + go c.importManager.expireOldTasksLoop(&c.wg, c.broker.ReleaseSegRefLock) + go c.importManager.sendOutTasksLoop(&c.wg) + + c.scheduler.Start() + + Params.RootCoordCfg.CreatedTime = time.Now() + Params.RootCoordCfg.UpdatedTime = time.Now() + + return nil } // Start starts RootCoord. func (c *Core) Start() error { - if err := c.checkInit(); err != nil { - log.Debug("RootCoord Start checkInit failed", zap.Error(err)) - return err - } - - log.Debug("starting service", - zap.String("service role", typeutil.RootCoordRole), - zap.Int64("node id", c.session.ServerID)) - + var err error c.startOnce.Do(func() { - if err := c.proxyManager.WatchProxy(); err != nil { - log.Fatal("RootCoord Start WatchProxy failed", zap.Error(err)) - // you can not just stuck here, - panic(err) - } - - c.wg.Add(5) - go c.startTimeTickLoop() - go c.tsLoop() - go c.chanTimeTick.startWatch(&c.wg) - go c.importManager.expireOldTasksLoop(&c.wg, c.CallReleaseSegRefLock) - go c.importManager.sendOutTasksLoop(&c.wg) - Params.RootCoordCfg.CreatedTime = time.Now() - Params.RootCoordCfg.UpdatedTime = time.Now() + err = c.startInternal() }) - - return nil + return err } // Stop stops rootCoord. @@ -1153,7 +636,6 @@ func (c *Core) Stop() error { // GetComponentStates get states of components func (c *Core) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) { code := c.stateCode.Load().(internalpb.StateCode) - log.Debug("GetComponentStates", zap.String("State Code", internalpb.StateCode_name[int32(code)])) nodeID := common.NotRegisteredID if c.session != nil && c.session.Registered() { @@ -1207,481 +689,505 @@ func (c *Core) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringRespon // CreateCollection create collection func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreateCollection") - log.Debug("CreateCollection", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) - t := &CreateCollectionReqTask{ - baseReqTask: baseReqTask{ + log.Info("received request to create collection", zap.String("role", typeutil.RootCoordRole), + zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &createCollectionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("CreateCollection failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to create collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "CreateCollection failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to create collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("name", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("CreateCollection success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreateCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCollections.Inc() + + log.Info("done to create collection", zap.String("role", typeutil.RootCoordRole), + zap.String("name", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropCollection drop collection func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropCollection") - log.Debug("DropCollection", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) - t := &DropCollectionReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to drop collection", zap.String("role", typeutil.RootCoordRole), + zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &dropCollectionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("DropCollection failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to drop collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "DropCollection failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to drop collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("name", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("DropCollection success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCollections.Dec() + + log.Info("done to drop collection", zap.String("role", typeutil.RootCoordRole), + zap.String("name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID()), + zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // HasCollection check collection existence func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), Value: false, }, nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("HasCollection") - log.Debug("HasCollection", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) - t := &HasCollectionReqTask{ - baseReqTask: baseReqTask{ + log.Info("received request to has collection", zap.String("role", typeutil.RootCoordRole), + zap.String("collection name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &hasCollectionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, - Req: in, - HasCollection: false, + Req: in, + Rsp: &milvuspb.BoolResponse{}, } - err := executeTask(t) - if err != nil { - log.Error("HasCollection failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to has collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection name", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc() + return &milvuspb.BoolResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()), + Value: false, + }, nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to enqueue request to has collection", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection name", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()), Value: false, }, nil } - log.Debug("HasCollection success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Bool("hasCollection", t.HasCollection)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("HasCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) - return &milvuspb.BoolResponse{ - Status: succStatus(), - Value: t.HasCollection, - }, nil + + log.Info("done to has collection", zap.String("role", typeutil.RootCoordRole), + zap.String("collection name", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()), + zap.Bool("exist", t.Rsp.GetValue())) + return t.Rsp, nil } // DescribeCollection return collection info func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return &milvuspb.DescribeCollectionResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode"+internalpb.StateCode_name[int32(code)]), }, nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DescribeCollection") - log.Ctx(ctx).Debug("DescribeCollection", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("id", in.CollectionID), zap.Int64("msgID", in.Base.MsgID)) - t := &DescribeCollectionReqTask{ - baseReqTask: baseReqTask{ + log.Info("received request to describe collection", zap.String("role", typeutil.RootCoordRole), + zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &describeCollectionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, Rsp: &milvuspb.DescribeCollectionResponse{}, } - err := executeTask(t) - if err != nil { - log.Ctx(ctx).Warn("DescribeCollection failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("id", in.CollectionID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to describe collection", + zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc() + return &milvuspb.DescribeCollectionResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeCollection failed: "+err.Error()), + }, nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to describe collection", + zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc() return &milvuspb.DescribeCollectionResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeCollection failed: "+err.Error()), }, nil } - log.Ctx(ctx).Debug("DescribeCollection success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("id", in.CollectionID), zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DescribeCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) - t.Rsp.Status = succStatus() + + log.Info("done to describe collection", zap.String("role", typeutil.RootCoordRole), + zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return t.Rsp, nil } // ShowCollections list all collection names func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return &milvuspb.ShowCollectionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), }, nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("ShowCollections") - log.Debug("ShowCollections", zap.String("role", typeutil.RootCoordRole), - zap.String("dbname", in.DbName), zap.Int64("msgID", in.Base.MsgID)) - t := &ShowCollectionReqTask{ - baseReqTask: baseReqTask{ + log.Info("received request to show collections", zap.String("role", typeutil.RootCoordRole), + zap.String("dbname", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &showCollectionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, Rsp: &milvuspb.ShowCollectionsResponse{}, } - err := executeTask(t) - if err != nil { - log.Error("ShowCollections failed", zap.String("role", typeutil.RootCoordRole), - zap.String("dbname", in.DbName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to show collections", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("dbname", in.GetDbName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc() + return &milvuspb.ShowCollectionsResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()), + }, nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to show collections", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("dbname", in.GetDbName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc() return &milvuspb.ShowCollectionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()), }, nil } - log.Debug("ShowCollections success", zap.String("role", typeutil.RootCoordRole), - zap.String("dbname", in.DbName), zap.Int("num of collections", len(t.Rsp.CollectionNames)), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.SuccessLabel).Inc() - t.Rsp.Status = succStatus() metrics.RootCoordDDLReqLatency.WithLabelValues("ShowCollections").Observe(float64(tr.ElapseSpan().Milliseconds())) + + log.Info("done to show collections", zap.String("role", typeutil.RootCoordRole), + zap.String("dbname", in.GetDbName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()), + zap.Int("num of collections", len(t.Rsp.GetCollectionNames()))) // maybe very large, print number instead. return t.Rsp, nil } // CreatePartition create partition func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreatePartition") - log.Debug("CreatePartition", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) - t := &CreatePartitionReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to create partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &createPartitionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("CreatePartition failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to create partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "CreatePartition failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to create partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("CreatePartition success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreatePartition").Observe(float64(tr.ElapseSpan().Milliseconds())) - metrics.RootCoordNumOfPartitions.WithLabelValues().Inc() + + log.Info("done to create partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropPartition drop partition func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropPartition") - log.Debug("DropPartition", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) - t := &DropPartitionReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to drop partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &dropPartitionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("DropPartition failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to drop partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "DropPartition failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + if err := t.WaitToFinish(); err != nil { + log.Error("failed to drop partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("DropPartition success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropPartition").Observe(float64(tr.ElapseSpan().Milliseconds())) - metrics.RootCoordNumOfPartitions.WithLabelValues().Dec() + + log.Info("done to drop partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // HasPartition check partition existence func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), Value: false, }, nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("HasPartition") - log.Debug("HasPartition", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) - t := &HasPartitionReqTask{ - baseReqTask: baseReqTask{ + log.Info("received request to has partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &hasPartitionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, - Req: in, - HasPartition: false, + Req: in, + Rsp: &milvuspb.BoolResponse{}, } - err := executeTask(t) - if err != nil { - log.Error("HasPartition failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to has partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc() + return &milvuspb.BoolResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()), + Value: false, + }, nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to has partition", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()), Value: false, }, nil } - log.Debug("HasPartition success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("HasPartition").Observe(float64(tr.ElapseSpan().Milliseconds())) - return &milvuspb.BoolResponse{ - Status: succStatus(), - Value: t.HasPartition, - }, nil + + log.Info("done to has partition", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()), + zap.Bool("exist", t.Rsp.GetValue())) + return t.Rsp, nil } // ShowPartitions list all partition names func (c *Core) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return &milvuspb.ShowPartitionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), }, nil } + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("ShowPartitions") - log.Debug("ShowPartitions", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID)) - t := &ShowPartitionReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to show partitions", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &showPartitionTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, Rsp: &milvuspb.ShowPartitionsResponse{}, } - err := executeTask(t) - if err != nil { - log.Error("ShowPartitions failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to show partitions", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc() return &milvuspb.ShowPartitionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()), }, nil } - log.Debug("ShowPartitions success", zap.String("role", typeutil.RootCoordRole), - zap.String("collection name", in.CollectionName), zap.Int("num of partitions", len(t.Rsp.PartitionNames)), - zap.Int64("msgID", t.Req.Base.MsgID)) + if err := t.WaitToFinish(); err != nil { + log.Error("failed to show partitions", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc() + return &milvuspb.ShowPartitionsResponse{ + Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()), + }, nil + } metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.SuccessLabel).Inc() - t.Rsp.Status = succStatus() metrics.RootCoordDDLReqLatency.WithLabelValues("ShowPartitions").Observe(float64(tr.ElapseSpan().Milliseconds())) + + log.Info("done to show partitions", zap.String("role", typeutil.RootCoordRole), + zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs()), + zap.Strings("partitions", t.Rsp.GetPartitionNames())) return t.Rsp, nil } -//// DescribeSegment return segment info -//func (c *Core) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) { -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegment", metrics.TotalLabel).Inc() -// if code, ok := c.checkHealthy(); !ok { -// return &milvuspb.DescribeSegmentResponse{ -// Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), -// }, nil -// } -// tr := timerecord.NewTimeRecorder("DescribeSegment") -// log.Debug("DescribeSegment", zap.String("role", typeutil.RootCoordRole), -// zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID), -// zap.Int64("msgID", in.Base.MsgID)) -// t := &DescribeSegmentReqTask{ -// baseReqTask: baseReqTask{ -// ctx: ctx, -// core: c, -// }, -// Req: in, -// Rsp: &milvuspb.DescribeSegmentResponse{}, -// } -// err := executeTask(t) -// if err != nil { -// log.Error("DescribeSegment failed", zap.String("role", typeutil.RootCoordRole), -// zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID), -// zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegment", metrics.FailLabel).Inc() -// return &milvuspb.DescribeSegmentResponse{ -// Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeSegment failed: "+err.Error()), -// }, nil -// } -// log.Debug("DescribeSegment success", zap.String("role", typeutil.RootCoordRole), -// zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID), -// zap.Int64("msgID", in.Base.MsgID)) -// -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegment", metrics.SuccessLabel).Inc() -// metrics.RootCoordDDLReqLatency.WithLabelValues("DescribeSegment").Observe(float64(tr.ElapseSpan().Milliseconds())) -// t.Rsp.Status = succStatus() -// return t.Rsp, nil -//} -// -//func (c *Core) DescribeSegments(ctx context.Context, in *rootcoordpb.DescribeSegmentsRequest) (*rootcoordpb.DescribeSegmentsResponse, error) { -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegments", metrics.TotalLabel).Inc() -// if code, ok := c.checkHealthy(); !ok { -// log.Error("failed to describe segments, rootcoord not healthy", -// zap.String("role", typeutil.RootCoordRole), -// zap.Int64("msgID", in.GetBase().GetMsgID()), -// zap.Int64("collection", in.GetCollectionID()), -// zap.Int64s("segments", in.GetSegmentIDs())) -// -// return &rootcoordpb.DescribeSegmentsResponse{ -// Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), -// }, nil -// } -// -// tr := timerecord.NewTimeRecorder("DescribeSegments") -// -// log.Debug("received request to describe segments", -// zap.String("role", typeutil.RootCoordRole), -// zap.Int64("msgID", in.GetBase().GetMsgID()), -// zap.Int64("collection", in.GetCollectionID()), -// zap.Int64s("segments", in.GetSegmentIDs())) -// -// t := &DescribeSegmentsReqTask{ -// baseReqTask: baseReqTask{ -// ctx: ctx, -// core: c, -// }, -// Req: in, -// Rsp: &rootcoordpb.DescribeSegmentsResponse{}, -// } -// -// if err := executeTask(t); err != nil { -// log.Error("failed to describe segments", -// zap.Error(err), -// zap.String("role", typeutil.RootCoordRole), -// zap.Int64("msgID", in.GetBase().GetMsgID()), -// zap.Int64("collection", in.GetCollectionID()), -// zap.Int64s("segments", in.GetSegmentIDs())) -// -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegments", metrics.FailLabel).Inc() -// return &rootcoordpb.DescribeSegmentsResponse{ -// Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeSegments failed: "+err.Error()), -// }, nil -// } -// -// log.Debug("succeed to describe segments", -// zap.String("role", typeutil.RootCoordRole), -// zap.Int64("msgID", in.GetBase().GetMsgID()), -// zap.Int64("collection", in.GetCollectionID()), -// zap.Int64s("segments", in.GetSegmentIDs())) -// -// metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeSegments", metrics.SuccessLabel).Inc() -// metrics.RootCoordDDLReqLatency.WithLabelValues("DescribeSegments").Observe(float64(tr.ElapseSpan().Milliseconds())) -// -// t.Rsp.Status = succStatus() -// return t.Rsp, nil -//} - // ShowSegments list all segments func (c *Core) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("ShowSegments", metrics.TotalLabel).Inc() - if code, ok := c.checkHealthy(); !ok { - return &milvuspb.ShowSegmentsResponse{ - Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), - }, nil - } - tr := timerecord.NewTimeRecorder("ShowSegments") - - log.Debug("ShowSegments", zap.String("role", typeutil.RootCoordRole), - zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID), - zap.Int64("msgID", in.Base.MsgID)) - t := &ShowSegmentReqTask{ - baseReqTask: baseReqTask{ - ctx: ctx, - core: c, - }, - Req: in, - Rsp: &milvuspb.ShowSegmentsResponse{}, - } - err := executeTask(t) - if err != nil { - log.Debug("ShowSegments failed", zap.String("role", typeutil.RootCoordRole), - zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) - metrics.RootCoordDDLReqCounter.WithLabelValues("ShowSegments", metrics.FailLabel).Inc() - return &milvuspb.ShowSegmentsResponse{ - Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowSegments failed: "+err.Error()), - }, nil - } - log.Debug("ShowSegments success", zap.String("role", typeutil.RootCoordRole), - zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID), - zap.Int64s("segments ids", t.Rsp.SegmentIDs), - zap.Int64("msgID", in.Base.MsgID)) - - metrics.RootCoordDDLReqCounter.WithLabelValues("ShowSegments", metrics.SuccessLabel).Inc() - metrics.RootCoordDDLReqLatency.WithLabelValues("ShowSegments").Observe(float64(tr.ElapseSpan().Milliseconds())) - t.Rsp.Status = succStatus() - return t.Rsp, nil + // ShowSegments Only used in GetPersistentSegmentInfo, it's already deprecated for a long time. + // Though we continue to keep current logic, it's not right enough since RootCoord only contains indexed segments. + return &milvuspb.ShowSegmentsResponse{Status: succStatus()}, nil } // AllocTimestamp alloc timestamp @@ -1691,22 +1197,25 @@ func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestam Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), }, nil } - ts, err := c.TSOAllocator(in.Count) + + ts, err := c.tsoAllocator.GenerateTSO(in.GetCount()) if err != nil { - log.Error("AllocTimestamp failed", zap.String("role", typeutil.RootCoordRole), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + log.Error("failed to allocate timestamp", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.Int64("msgID", in.GetBase().GetMsgID())) + return &rootcoordpb.AllocTimestampResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocTimestamp failed: "+err.Error()), }, nil } - //return first available time stamp - ts = ts - uint64(in.Count) + 1 + // return first available timestamp + ts = ts - uint64(in.GetCount()) + 1 metrics.RootCoordTimestamp.Set(float64(ts)) return &rootcoordpb.AllocTimestampResponse{ Status: succStatus(), Timestamp: ts, - Count: in.Count, + Count: in.GetCount(), }, nil } @@ -1717,15 +1226,18 @@ func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*ro Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), }, nil } - start, _, err := c.IDAllocator(in.Count) + start, _, err := c.idAllocator.Alloc(in.Count) if err != nil { - log.Error("AllocID failed", zap.String("role", typeutil.RootCoordRole), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + log.Error("failed to allocate id", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.Int64("msgID", in.GetBase().GetMsgID())) + return &rootcoordpb.AllocIDResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocID failed: "+err.Error()), Count: in.Count, }, nil } + metrics.RootCoordIDAllocCounter.Add(float64(in.Count)) return &rootcoordpb.AllocIDResponse{ Status: succStatus(), @@ -1754,18 +1266,6 @@ func (c *Core) UpdateChannelTimeTick(ctx context.Context, in *internalpb.Channel return succStatus(), nil } -// ReleaseDQLMessageStream release DQL msgstream -func (c *Core) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - if code, ok := c.checkHealthy(); !ok { - return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil - } - err := c.proxyClientManager.ReleaseDQLMessageStream(ctx, in) - if err != nil { - return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil - } - return succStatus(), nil -} - // InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies. func (c *Core) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { @@ -1778,48 +1278,6 @@ func (c *Core) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.In return succStatus(), nil } -//// SegmentFlushCompleted check whether segment flush has completed -//func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg) (status *commonpb.Status, err error) { -// if code, ok := c.checkHealthy(); !ok { -// return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil -// } -// if in.Base.MsgType != commonpb.MsgType_SegmentFlushDone { -// return failStatus(commonpb.ErrorCode_UnexpectedError, "invalid msg type "+commonpb.MsgType_name[int32(in.Base.MsgType)]), nil -// } -// -// log.Info("SegmentFlushCompleted received", zap.Int64("msgID", in.Base.MsgID), zap.Int64("collID", in.Segment.CollectionID), -// zap.Int64("partID", in.Segment.PartitionID), zap.Int64("segID", in.Segment.ID), zap.Int64s("compactFrom", in.Segment.CompactionFrom)) -// -// err = c.createIndexForSegment(ctx, in.Segment.CollectionID, in.Segment.PartitionID, in.Segment.ID, in.Segment.NumOfRows, in.Segment.Binlogs) -// if err != nil { -// log.Error("createIndexForSegment", zap.Int64("msgID", in.Base.MsgID), zap.Int64("collID", in.Segment.CollectionID), -// zap.Int64("partID", in.Segment.PartitionID), zap.Int64("segID", in.Segment.ID), zap.Error(err)) -// return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil -// } -// -// buildIDs := c.MetaTable.GetBuildIDsBySegIDs(in.Segment.CompactionFrom) -// if len(buildIDs) != 0 { -// if err = c.CallRemoveIndexService(ctx, buildIDs); err != nil { -// log.Error("CallRemoveIndexService failed", zap.Int64("msgID", in.Base.MsgID), zap.Int64("collID", in.Segment.CollectionID), -// zap.Int64("partID", in.Segment.PartitionID), zap.Int64("segID", in.Segment.ID), -// zap.Int64s("compactFrom", in.Segment.CompactionFrom), zap.Int64s("buildIDs", buildIDs), zap.Error(err)) -// return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil -// } -// } -// -// if err = c.MetaTable.RemoveSegments(in.Segment.CollectionID, in.Segment.PartitionID, in.Segment.CompactionFrom); err != nil { -// log.Error("RemoveSegments failed", zap.Int64("msgID", in.Base.MsgID), zap.Int64("collID", in.Segment.CollectionID), -// zap.Int64("partID", in.Segment.PartitionID), zap.Int64("segID", in.Segment.ID), -// zap.Int64s("compactFrom", in.Segment.CompactionFrom), zap.Error(err)) -// return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil -// } -// -// log.Debug("SegmentFlushCompleted success", zap.String("role", typeutil.RootCoordRole), -// zap.Int64("collection id", in.Segment.CollectionID), zap.Int64("partition id", in.Segment.PartitionID), -// zap.Int64("segment id", in.Segment.ID), zap.Int64("msgID", in.Base.MsgID)) -// return succStatus(), nil -//} - //ShowConfigurations returns the configurations of RootCoord matching req.Pattern func (c *Core) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) { if code, ok := c.checkHealthy(); !ok { @@ -1888,100 +1346,152 @@ func (c *Core) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) ( // CreateAlias create collection alias func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreateAlias") - log.Debug("CreateAlias", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID)) - t := &CreateAliasReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to create alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &createAliasTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("CreateAlias failed", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to create alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "CreateAlias failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to create alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("CreateAlias success", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreateAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) + + log.Info("done to create alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropAlias drop collection alias func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropAlias") - log.Debug("DropAlias", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID)) - t := &DropAliasReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to drop alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &dropAliasTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("DropAlias failed", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to drop alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "DropAlias failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to drop alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("DropAlias success", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) + + log.Info("done to drop alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // AlterAlias alter collection alias func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (*commonpb.Status, error) { - metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil } + + metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("AlterAlias") - log.Debug("AlterAlias", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID)) - t := &AlterAliasReqTask{ - baseReqTask: baseReqTask{ + + log.Info("received request to alter alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + + t := &alterAliasTask{ + baseTaskV2: baseTaskV2{ ctx: ctx, core: c, + done: make(chan error, 1), }, Req: in, } - err := executeTask(t) - if err != nil { - log.Error("AlterAlias failed", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) + + if err := c.scheduler.AddTask(t); err != nil { + log.Error("failed to enqueue request to alter alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID())) + metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.FailLabel).Inc() - return failStatus(commonpb.ErrorCode_UnexpectedError, "AlterAlias failed: "+err.Error()), nil + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil + } + + if err := t.WaitToFinish(); err != nil { + log.Error("failed to alter alias", zap.String("role", typeutil.RootCoordRole), + zap.Error(err), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) + + metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.FailLabel).Inc() + return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } - log.Debug("AlterAlias success", zap.String("role", typeutil.RootCoordRole), - zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName), - zap.Int64("msgID", in.Base.MsgID)) metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("AlterAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) + + log.Info("done to alter alias", zap.String("role", typeutil.RootCoordRole), + zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), + zap.Int64("msgID", in.GetBase().GetMsgID()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } @@ -1996,14 +1506,14 @@ func (c *Core) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvus // Get collection/partition ID from collection/partition name. var cID UniqueID var err error - if cID, err = c.MetaTable.GetCollectionIDByName(req.GetCollectionName()); err != nil { + if cID, err = c.meta.GetCollectionIDByName(req.GetCollectionName()); err != nil { log.Error("failed to find collection ID from its name", zap.String("collection name", req.GetCollectionName()), zap.Error(err)) return nil, err } var pID UniqueID - if pID, err = c.MetaTable.getPartitionByName(cID, req.GetPartitionName(), 0); err != nil { + if pID, err = c.meta.GetPartitionByName(cID, req.GetPartitionName(), typeutil.MaxTimestamp); err != nil { log.Error("failed to get partition ID from its name", zap.String("partition name", req.GetPartitionName()), zap.Error(err)) @@ -2062,7 +1572,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( if ir.GetState() == commonpb.ImportState_ImportAllocSegment { // Lock the segments, so we don't lose track of them when compaction happens. // Note that these locks will be unlocked in c.postImportPersistLoop() -> checkSegmentLoadedLoop(). - if err := c.CallAddSegRefLock(ctx, ir.GetTaskId(), ir.GetSegments()); err != nil { + if err := c.broker.AddSegRefLock(ctx, ir.GetTaskId(), ir.GetSegments()); err != nil { log.Error("failed to acquire segment ref lock", zap.Error(err)) return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UnexpectedError, @@ -2103,7 +1613,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( // Release segments when task fails. log.Info("task failed, release segment ref locks") err := retry.Do(ctx, func() error { - return c.CallReleaseSegRefLock(ctx, ir.GetTaskId(), ir.GetSegments()) + return c.broker.ReleaseSegRefLock(ctx, ir.GetTaskId(), ir.GetSegments()) }, retry.Attempts(100)) if err != nil { log.Error("failed to release lock, about to panic!") @@ -2125,7 +1635,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( // Look up collection name on collection ID. var colName string var colMeta *model.Collection - if colMeta, err = c.MetaTable.GetCollectionByID(ti.GetCollectionId(), 0); err != nil { + if colMeta, err = c.meta.GetCollectionByID(ctx, ti.GetCollectionId(), typeutil.MaxTimestamp); err != nil { log.Error("failed to get collection name", zap.Int64("collection ID", ti.GetCollectionId()), zap.Error(err)) @@ -2143,7 +1653,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( resendTaskFunc() // Flush all import data segments. - c.CallFlushOnCollection(ctx, ti.GetCollectionId(), ir.GetSegments()) + c.broker.Flush(ctx, ti.GetCollectionId(), ir.GetSegments()) // Check if data are "queryable" and if indices are built on all segments. go c.postImportPersistLoop(c.ctx, ir.GetTaskId(), ti.GetCollectionId(), colName, ir.GetSegments()) @@ -2160,7 +1670,7 @@ func (c *Core) CountCompleteIndex(ctx context.Context, collectionName string, co // Note: Index name is always Params.CommonCfg.DefaultIndexName in current Milvus designs as of today. indexName := Params.CommonCfg.DefaultIndexName - states, err := c.CallGetSegmentIndexStateService(ctx, collectionID, indexName, allSegmentIDs) + states, err := c.broker.GetSegmentIndexState(ctx, collectionID, indexName, allSegmentIDs) if err != nil { log.Error("failed to get index state in checkSegmentIndexStates", zap.Error(err)) return false, err @@ -2188,7 +1698,7 @@ func (c *Core) postImportPersistLoop(ctx context.Context, taskID int64, colID in c.wg.Add(1) go c.checkSegmentLoadedLoop(ctx, taskID, colID, segIDs) // Check if collection has any indexed fields. If so, start a loop to check segments' index states. - if _, err := c.MetaTable.GetCollectionByID(colID, 0); err != nil { + if _, err := c.meta.GetCollectionByID(ctx, colID, typeutil.MaxTimestamp); err != nil { log.Error("failed to find meta for collection", zap.Int64("collection ID", colID), zap.Error(err)) @@ -2209,7 +1719,7 @@ func (c *Core) checkSegmentLoadedLoop(ctx context.Context, taskID int64, colID i defer func() { log.Info("we are done checking segment loading state, release segment ref locks") err := retry.Do(ctx, func() error { - return c.CallReleaseSegRefLock(ctx, taskID, segIDs) + return c.broker.ReleaseSegRefLock(ctx, taskID, segIDs) }, retry.Attempts(100)) if err != nil { log.Error("failed to release lock, about to panic!") @@ -2222,7 +1732,7 @@ func (c *Core) checkSegmentLoadedLoop(ctx context.Context, taskID int64, colID i log.Info("(in check segment loaded loop) context done, exiting checkSegmentLoadedLoop") return case <-ticker.C: - resp, err := c.CallGetSegmentInfoService(ctx, colID, segIDs) + resp, err := c.broker.GetQuerySegmentInfo(ctx, colID, segIDs) log.Debug("(in check segment loaded loop)", zap.Int64("task ID", taskID), zap.Int64("collection ID", colID), @@ -2323,7 +1833,7 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden zap.String("username", credInfo.Username)) // insert to db - err := c.MetaTable.AddCredential(credInfo) + err := c.meta.AddCredential(credInfo) if err != nil { log.Error("CreateCredential save credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) @@ -2354,7 +1864,7 @@ func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialR log.Debug("GetCredential", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username)) - credInfo, err := c.MetaTable.GetCredential(in.Username) + credInfo, err := c.meta.GetCredential(in.Username) if err != nil { log.Error("GetCredential query credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username), zap.Error(err)) @@ -2383,7 +1893,7 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden log.Debug("UpdateCredential", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username)) // update data on storage - err := c.MetaTable.AlterCredential(credInfo) + err := c.meta.AlterCredential(credInfo) if err != nil { log.Error("UpdateCredential save credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) @@ -2413,7 +1923,7 @@ func (c *Core) DeleteCredential(ctx context.Context, in *milvuspb.DeleteCredenti tr := timerecord.NewTimeRecorder(method) // delete data on storage - err := c.MetaTable.DeleteCredential(in.Username) + err := c.meta.DeleteCredential(in.Username) if err != nil { log.Error("DeleteCredential remove credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username), zap.Error(err)) @@ -2443,7 +1953,7 @@ func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequ metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) - credInfo, err := c.MetaTable.ListCredentialUsernames() + credInfo, err := c.meta.ListCredentialUsernames() if err != nil { log.Error("ListCredUsers query usernames failed", zap.String("role", typeutil.RootCoordRole), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) @@ -2466,7 +1976,7 @@ func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequ // - check the node health // - check if the role is existed // - check if the role num has reached the limit -// - create the role by the metatable api +// - create the role by the meta api func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) (*commonpb.Status, error) { method := "CreateRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() @@ -2478,7 +1988,7 @@ func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) ( } entity := in.Entity - err := c.MetaTable.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}) + err := c.meta.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}) if err != nil { errMsg := "fail to create role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2499,7 +2009,7 @@ func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) ( // - check if the role has some grant info // - get all role mapping of this role // - drop these role mappings -// - drop the role by the metatable api +// - drop the role by the meta api func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*commonpb.Status, error) { method := "DropRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() @@ -2509,13 +2019,13 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com if code, ok := c.checkHealthy(); !ok { return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } - if _, err := c.MetaTable.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { + if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { errMsg := "the role isn't existed" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } - grantEntities, err := c.MetaTable.SelectGrant(util.DefaultTenant, &milvuspb.GrantEntity{ + grantEntities, err := c.meta.SelectGrant(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: in.RoleName}, }) if len(grantEntities) != 0 { @@ -2523,7 +2033,7 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } - roleResults, err := c.MetaTable.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, true) + roleResults, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, true) if err != nil { errMsg := "fail to select a role by role name" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2532,7 +2042,7 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com logger.Debug("role to user info", zap.Int("counter", len(roleResults))) for _, roleResult := range roleResults { for index, userEntity := range roleResult.Users { - if err = c.MetaTable.OperateUserRole(util.DefaultTenant, + if err = c.meta.OperateUserRole(util.DefaultTenant, &milvuspb.UserEntity{Name: userEntity.Name}, &milvuspb.RoleEntity{Name: roleResult.Role.Name}, milvuspb.OperateUserRoleType_RemoveUserFromRole); err != nil { if common.IsIgnorableError(err) { @@ -2544,12 +2054,12 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com } } } - if err = c.MetaTable.DropGrant(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}); err != nil { + if err = c.meta.DropGrant(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}); err != nil { errMsg := "fail to drop the grant" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } - if err = c.MetaTable.DropRole(util.DefaultTenant, in.RoleName); err != nil { + if err = c.meta.DropRole(util.DefaultTenant, in.RoleName); err != nil { errMsg := "fail to drop the role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil @@ -2566,7 +2076,7 @@ func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*com // - check the node health // - check if the role is valid // - check if the user is valid -// - operate the user-role by the metatable api +// - operate the user-role by the meta api // - update the policy cache func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRoleRequest) (*commonpb.Status, error) { method := "OperateUserRole-" + in.Type.String() @@ -2578,18 +2088,18 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } - if _, err := c.MetaTable.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { + if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { errMsg := "fail to check the role name" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } - if _, err := c.MetaTable.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, false); err != nil { + if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, false); err != nil { errMsg := "fail to check the username" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } updateCache := true - if err := c.MetaTable.OperateUserRole(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, &milvuspb.RoleEntity{Name: in.RoleName}, in.Type); err != nil { + if err := c.meta.OperateUserRole(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, &milvuspb.RoleEntity{Name: in.RoleName}, in.Type); err != nil { if !common.IsIgnorableError(err) { errMsg := "fail to operate user to role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2629,7 +2139,7 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole // SelectRole select role // - check the node health // - check if the role is valid when this param is provided -// - select role by the metatable api +// - select role by the meta api func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) (*milvuspb.SelectRoleResponse, error) { method := "SelectRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() @@ -2641,7 +2151,7 @@ func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) ( } if in.Role != nil { - if _, err := c.MetaTable.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.Role.Name}, false); err != nil { + if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.Role.Name}, false); err != nil { if common.IsKeyNotExistError(err) { return &milvuspb.SelectRoleResponse{ Status: succStatus(), @@ -2654,7 +2164,7 @@ func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) ( }, nil } } - roleResults, err := c.MetaTable.SelectRole(util.DefaultTenant, in.Role, in.IncludeUserInfo) + roleResults, err := c.meta.SelectRole(util.DefaultTenant, in.Role, in.IncludeUserInfo) if err != nil { errMsg := "fail to select the role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2675,7 +2185,7 @@ func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) ( // SelectUser select user // - check the node health // - check if the user is valid when this param is provided -// - select user by the metatable api +// - select user by the meta api func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) (*milvuspb.SelectUserResponse, error) { method := "SelectUser" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() @@ -2687,7 +2197,7 @@ func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) ( } if in.User != nil { - if _, err := c.MetaTable.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.User.Name}, false); err != nil { + if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.User.Name}, false); err != nil { if common.IsKeyNotExistError(err) { return &milvuspb.SelectUserResponse{ Status: succStatus(), @@ -2700,7 +2210,7 @@ func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) ( }, nil } } - userResults, err := c.MetaTable.SelectUser(util.DefaultTenant, in.User, in.IncludeRoleInfo) + userResults, err := c.meta.SelectUser(util.DefaultTenant, in.User, in.IncludeRoleInfo) if err != nil { errMsg := "fail to select the user" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2725,7 +2235,7 @@ func (c *Core) isValidRole(entity *milvuspb.RoleEntity) error { if entity.Name == "" { return errors.New("the name in the role entity is empty") } - if _, err := c.MetaTable.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}, false); err != nil { + if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}, false); err != nil { return err } return nil @@ -2751,7 +2261,7 @@ func (c *Core) isValidGrantor(entity *milvuspb.GrantorEntity, object string) err if entity.User.Name == "" { return errors.New("the name in the user entity of the grantor entity is empty") } - if _, err := c.MetaTable.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: entity.User.Name}, false); err != nil { + if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: entity.User.Name}, false); err != nil { return err } if entity.Privilege == nil { @@ -2780,7 +2290,7 @@ func (c *Core) isValidGrantor(entity *milvuspb.GrantorEntity, object string) err // - check if the operating type is valid // - check if the entity is nil // - check if the params, including the resource entity, the principal entity, the grantor entity, is valid -// - operate the privilege by the metatable api +// - operate the privilege by the meta api // - update the policy cache func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivilegeRequest) (*commonpb.Status, error) { method := "OperatePrivilege" @@ -2823,7 +2333,7 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile in.Entity.ObjectName = util.AnyWord } updateCache := true - if err := c.MetaTable.OperatePrivilege(util.DefaultTenant, in.Entity, in.Type); err != nil { + if err := c.meta.OperatePrivilege(util.DefaultTenant, in.Entity, in.Type); err != nil { if !common.IsIgnorableError(err) { errMsg := "fail to operate the privilege" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2864,7 +2374,7 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile // - check the node health // - check if the principal entity is valid // - check if the resource entity which is provided by the user is valid -// - select grant by the metatable api +// - select grant by the meta api func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest) (*milvuspb.SelectGrantResponse, error) { method := "SelectGrant" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() @@ -2898,7 +2408,7 @@ func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest) } } - grantEntities, err := c.MetaTable.SelectGrant(util.DefaultTenant, in.Entity) + grantEntities, err := c.meta.SelectGrant(util.DefaultTenant, in.Entity) if common.IsKeyNotExistError(err) { return &milvuspb.SelectGrantResponse{ Status: succStatus(), @@ -2933,7 +2443,7 @@ func (c *Core) ListPolicy(ctx context.Context, in *internalpb.ListPolicyRequest) }, errorutil.UnhealthyError() } - policies, err := c.MetaTable.ListPolicy(util.DefaultTenant) + policies, err := c.meta.ListPolicy(util.DefaultTenant) if err != nil { errMsg := "fail to list policy" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) @@ -2941,7 +2451,7 @@ func (c *Core) ListPolicy(ctx context.Context, in *internalpb.ListPolicyRequest) Status: failStatus(commonpb.ErrorCode_ListPolicyFailure, errMsg), }, nil } - userRoles, err := c.MetaTable.ListUserRole(util.DefaultTenant) + userRoles, err := c.meta.ListUserRole(util.DefaultTenant) if err != nil { errMsg := "fail to list user-role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) diff --git a/internal/rootcoord/root_coord_test.go b/internal/rootcoord/root_coord_test.go index c36c814484..a66d84e8e5 100644 --- a/internal/rootcoord/root_coord_test.go +++ b/internal/rootcoord/root_coord_test.go @@ -1,2949 +1,805 @@ -// Licensed to the LF AI & Data foundation under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package rootcoord import ( "context" - "encoding/json" - "errors" - "fmt" "math/rand" - "path" - "sync" "testing" - "time" - "github.com/golang/protobuf/proto" - "github.com/milvus-io/milvus/internal/common" - "github.com/milvus-io/milvus/internal/kv" - etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" - memkv "github.com/milvus-io/milvus/internal/kv/mem" - "github.com/milvus-io/milvus/internal/metastore/kv/rootcoord" - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/milvus-io/milvus/internal/mq/msgstream" - "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/datapb" - "github.com/milvus-io/milvus/internal/proto/indexpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/proxypb" - "github.com/milvus-io/milvus/internal/proto/querypb" - "github.com/milvus-io/milvus/internal/proto/rootcoordpb" - "github.com/milvus-io/milvus/internal/proto/schemapb" - "github.com/milvus-io/milvus/internal/types" - "github.com/milvus-io/milvus/internal/util" - "github.com/milvus-io/milvus/internal/util/dependency" - "github.com/milvus-io/milvus/internal/util/etcd" - "github.com/milvus-io/milvus/internal/util/funcutil" + "github.com/milvus-io/milvus/internal/util/metricsinfo" - "github.com/milvus-io/milvus/internal/util/retry" "github.com/milvus-io/milvus/internal/util/sessionutil" - "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/proto/internalpb" + + "github.com/milvus-io/milvus/internal/allocator" + + "github.com/milvus-io/milvus/internal/proto/rootcoordpb" + + "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/milvus-io/milvus/internal/proto/milvuspb" ) -const ( - TestDMLChannelNum = 32 - returnError = "ReturnError" - returnUnsuccessfulStatus = "ReturnUnsuccessfulStatus" -) - -var disabledIndexBuildID []int64 - -type ctxKey struct{} - -type proxyMock struct { - types.Proxy - collArray []string - collIDs []UniqueID - mutex sync.Mutex - - returnError bool - returnGrpcError bool -} - -func (p *proxyMock) Stop() error { - return nil -} - -func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { - p.mutex.Lock() - defer p.mutex.Unlock() - if p.returnError { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - }, nil - } - if p.returnGrpcError { - return nil, fmt.Errorf("grpc error") - } - p.collArray = append(p.collArray, request.CollectionName) - p.collIDs = append(p.collIDs, request.CollectionID) - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil -} - -func (p *proxyMock) GetCollArray() []string { - p.mutex.Lock() - defer p.mutex.Unlock() - ret := make([]string, 0, len(p.collArray)) - ret = append(ret, p.collArray...) - return ret -} - -func (p *proxyMock) GetCollIDs() []UniqueID { - p.mutex.Lock() - defer p.mutex.Unlock() - ret := p.collIDs - return ret -} - -func (p *proxyMock) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) { - if p.returnError { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - }, nil - } - if p.returnGrpcError { - return nil, fmt.Errorf("grpc error") - } - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -func (p *proxyMock) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) { - if p.returnError { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - }, nil - } - if p.returnGrpcError { - return nil, fmt.Errorf("grpc error") - } - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -func (p *proxyMock) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, nil -} - -type dataMock struct { - types.DataCoord - randVal int - mu sync.Mutex - segs []typeutil.UniqueID -} - -func (d *dataMock) Init() error { - return nil -} - -func (d *dataMock) Start() error { - return nil -} - -func (d *dataMock) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsertBinlogPathsRequest) (*datapb.GetInsertBinlogPathsResponse, error) { - rst := &datapb.GetInsertBinlogPathsResponse{ - FieldIDs: []int64{}, - Paths: []*internalpb.StringList{}, - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - } - for i := 0; i < 200; i++ { - rst.FieldIDs = append(rst.FieldIDs, int64(i)) - path := &internalpb.StringList{ - Values: []string{fmt.Sprintf("file0-%d", i), fmt.Sprintf("file1-%d", i), fmt.Sprintf("file2-%d", i)}, - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - } - rst.Paths = append(rst.Paths, path) - } - return rst, nil -} - -func (d *dataMock) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoRequest) (*datapb.GetSegmentInfoResponse, error) { - return &datapb.GetSegmentInfoResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - Infos: []*datapb.SegmentInfo{ - { - NumOfRows: Params.RootCoordCfg.MinSegmentSizeToEnableIndex, - State: commonpb.SegmentState_Flushed, - }, - }, - }, nil -} - -func (d *dataMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInfoRequest) (*datapb.GetRecoveryInfoResponse, error) { - var fieldBinlog []*datapb.FieldBinlog - for i := 0; i < 200; i++ { - binlog := &datapb.FieldBinlog{ - FieldID: int64(i), - Binlogs: []*datapb.Binlog{ - { - LogPath: fmt.Sprintf("file0-%d", i), - }, - { - LogPath: fmt.Sprintf("file1-%d", i), - }, - { - LogPath: fmt.Sprintf("file2-%d", i), - }, - }, - } - fieldBinlog = append(fieldBinlog, binlog) - } - - d.mu.Lock() - segmentBinlogs := make([]*datapb.SegmentBinlogs, 0, len(d.segs)) - for _, segID := range d.segs { - segmentBinlog := &datapb.SegmentBinlogs{ - SegmentID: segID, - NumOfRows: Params.RootCoordCfg.MinSegmentSizeToEnableIndex, - FieldBinlogs: fieldBinlog, - } - segmentBinlogs = append(segmentBinlogs, segmentBinlog) - } - d.mu.Unlock() - - return &datapb.GetRecoveryInfoResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - Binlogs: segmentBinlogs, - }, nil -} - -func (d *dataMock) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedSegmentsRequest) (*datapb.GetFlushedSegmentsResponse, error) { - d.mu.Lock() - defer d.mu.Unlock() - - rsp := &datapb.GetFlushedSegmentsResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - } - rsp.Segments = append(rsp.Segments, d.segs...) - return rsp, nil -} - -func (d *dataMock) WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) { - return &datapb.WatchChannelsResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }}, nil -} - -func (d *dataMock) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStateRequest) (*datapb.SetSegmentStateResponse, error) { - if req.GetSegmentId() == 999 /* intended failure seg ID */ { - return &datapb.SetSegmentStateResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - }, - }, nil - } - return &datapb.SetSegmentStateResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - }, nil -} - -func (d *dataMock) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) { - return &datapb.ImportTaskResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - }, nil -} - -func (d *dataMock) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) { - return &datapb.FlushResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - }, nil -} - -func (d *dataMock) AcquireSegmentLock(context.Context, *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -func (d *dataMock) ReleaseSegmentLock(context.Context, *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -type queryMock struct { - types.QueryCoord - collID []typeutil.UniqueID - mutex sync.Mutex -} - -func (q *queryMock) Init() error { - return nil -} - -func (q *queryMock) Start() error { - return nil -} - -func (q *queryMock) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { - q.mutex.Lock() - defer q.mutex.Unlock() - q.collID = append(q.collID, req.CollectionID) - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -func (q *queryMock) ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -type indexMock struct { - types.IndexCoord -} - -func (idx *indexMock) Init() error { - return nil -} - -func (idx *indexMock) Start() error { - return nil -} - -func (idx *indexMock) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) { - return &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, nil -} - -func (idx *indexMock) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) { - v := ctx.Value(ctxKey{}).(string) - if v == returnError { - return nil, fmt.Errorf("injected error") - } else if v == returnUnsuccessfulStatus { - return &indexpb.GetSegmentIndexStateResponse{ - Status: &commonpb.Status{ - ErrorCode: 100, - Reason: "not so good", - }, - }, nil - } - segIdxState := make([]*indexpb.SegmentIndexState, 0) - for _, segID := range req.SegmentIDs { - segIdxState = append(segIdxState, &indexpb.SegmentIndexState{ - SegmentID: segID, - State: commonpb.IndexState_Finished, - FailReason: "", - }) - } - return &indexpb.GetSegmentIndexStateResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - Reason: "", - }, - States: segIdxState, - }, nil -} - -func clearMsgChan(timeout time.Duration, targetChan <-chan *msgstream.MsgPack) { - ch := time.After(timeout) - for { - select { - case <-ch: - return - case <-targetChan: - - } - } -} - -func getNotTtMsg(ctx context.Context, n int, ch <-chan *msgstream.MsgPack) []msgstream.TsMsg { - ret := make([]msgstream.TsMsg, 0, n) - for { - select { - case <-ctx.Done(): - return nil - case msg, ok := <-ch: - if ok { - for _, v := range msg.Msgs { - if _, ok := v.(*msgstream.TimeTickMsg); !ok { - ret = append(ret, v) - } - } - if len(ret) >= n { - return ret - } - } - } - } -} - -func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32, modifyFunc func(collection *model.Collection)) error { - schema := schemapb.CollectionSchema{ - Name: collName, - } - - sbf, err := proto.Marshal(&schema) - if err != nil { - return err - } - - t := &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - Timestamp: 100, - }, - DbName: dbName, - CollectionName: collName, - Schema: sbf, - ShardsNum: shardsNum, - } - - err = proto.Unmarshal(t.Schema, &schema) - if err != nil { - return fmt.Errorf("unmarshal schema error= %w", err) - } - - for idx, field := range schema.Fields { - field.FieldID = int64(idx + StartOfUserFieldID) - } - rowIDField := &schemapb.FieldSchema{ - FieldID: int64(RowIDField), - Name: RowIDFieldName, - IsPrimaryKey: false, - Description: "row id", - DataType: schemapb.DataType_Int64, - } - timeStampField := &schemapb.FieldSchema{ - FieldID: int64(TimeStampField), - Name: TimeStampFieldName, - IsPrimaryKey: false, - Description: "time stamp", - DataType: schemapb.DataType_Int64, - } - schema.Fields = append(schema.Fields, rowIDField, timeStampField) - - collID, _, err := core.IDAllocator(1) - if err != nil { - return fmt.Errorf("alloc collection id error = %w", err) - } - partID, _, err := core.IDAllocator(1) - if err != nil { - return fmt.Errorf("alloc partition id error = %w", err) - } - - vchanNames := make([]string, t.ShardsNum) - chanNames := core.chanTimeTick.getDmlChannelNames(int(t.ShardsNum)) - for i := int32(0); i < t.ShardsNum; i++ { - vchanNames[i] = fmt.Sprintf("%s_%dv%d", chanNames[i], collID, i) - } - - collInfo := model.Collection{ - CollectionID: collID, - Name: schema.Name, - Description: schema.Description, - AutoID: schema.AutoID, - Fields: model.UnmarshalFieldModels(schema.Fields), - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - ShardsNum: 0, // intend to set zero - Partitions: []*model.Partition{ - { - PartitionID: partID, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: 0, - }, - }, - } - - if modifyFunc != nil { - modifyFunc(&collInfo) - } - - // schema is modified (add RowIDField and TimestampField), - // so need Marshal again - schemaBytes, err := proto.Marshal(&schema) - if err != nil { - return fmt.Errorf("marshal schema error = %w", err) - } - - ddCollReq := internalpb.CreateCollectionRequest{ - Base: t.Base, - DbName: t.DbName, - CollectionName: t.CollectionName, - PartitionName: Params.CommonCfg.DefaultPartitionName, - DbID: 0, //TODO,not used - CollectionID: collID, - PartitionID: partID, - Schema: schemaBytes, - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - } - - reason := fmt.Sprintf("create collection %d", collID) - ts, err := core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("tso alloc fail, error = %w", err) - } - - // build DdOperation and save it into etcd, when ddmsg send fail, - // system can restore ddmsg from etcd and re-send - ddCollReq.Base.Timestamp = ts - ddOpStr, err := EncodeDdOperation(&ddCollReq, CreateCollectionDDType) - if err != nil { - return fmt.Errorf("encodeDdOperation fail, error = %w", err) - } - - // use lambda function here to guarantee all resources to be released - createCollectionFn := func() error { - // lock for ddl operation - core.ddlLock.Lock() - defer core.ddlLock.Unlock() - - core.chanTimeTick.addDdlTimeTick(ts, reason) - // clear ddl timetick in all conditions - defer core.chanTimeTick.removeDdlTimeTick(ts, reason) - - err = core.MetaTable.AddCollection(&collInfo, ts, ddOpStr) - if err != nil { - return fmt.Errorf("meta table add collection failed,error = %w", err) - } - return nil - } - - err = createCollectionFn() - if err != nil { - return err - } - return nil -} - -// a mock kv that always fail when LoadWithPrefix -type loadPrefixFailKV struct { - kv.TxnKV -} - -// LoadWithPrefix override behavior -func (kv *loadPrefixFailKV) LoadWithPrefix(key string) ([]string, []string, error) { - return []string{}, []string{}, retry.Unrecoverable(errors.New("mocked fail")) -} - -func TestRootCoordInit(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - coreFactory := dependency.NewDefaultFactory(true) - Params.Init() - Params.RootCoordCfg.DmlChannelNum = TestDMLChannelNum - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.NoError(t, err) - defer etcdCli.Close() - - core, err := NewCore(ctx, coreFactory) - require.Nil(t, err) - assert.NoError(t, err) - core.SetEtcdClient(etcdCli) - randVal := rand.Int() - - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - err = core.Init() - assert.NoError(t, err) - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - // inject kvBaseCreate fail - core, err = NewCore(ctx, coreFactory) - core.SetEtcdClient(etcdCli) - require.Nil(t, err) - assert.NoError(t, err) - randVal = rand.Int() - - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - core.kvBaseCreate = func(string) (kv.TxnKV, error) { - return nil, retry.Unrecoverable(errors.New("injected")) - } - core.metaKVCreate = func(root string) (kv.MetaKv, error) { - return nil, retry.Unrecoverable(errors.New("injected")) - } - err = core.Init() - assert.Error(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - // inject metaKV create fail - core, err = NewCore(ctx, coreFactory) - core.SetEtcdClient(etcdCli) - require.Nil(t, err) - assert.NoError(t, err) - randVal = rand.Int() - - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - core.kvBaseCreate = func(root string) (kv.TxnKV, error) { - if root == Params.EtcdCfg.MetaRootPath { - return nil, retry.Unrecoverable(errors.New("injected")) - } - return memkv.NewMemoryKV(), nil - } - core.metaKVCreate = func(root string) (kv.MetaKv, error) { - return nil, nil - } - err = core.Init() - assert.Error(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - // inject newSuffixSnapshot failure - core, err = NewCore(ctx, coreFactory) - core.SetEtcdClient(etcdCli) - require.Nil(t, err) - assert.NoError(t, err) - randVal = rand.Int() - - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - core.kvBaseCreate = func(string) (kv.TxnKV, error) { - return nil, nil - } - core.metaKVCreate = func(root string) (kv.MetaKv, error) { - return nil, nil - } - err = core.Init() - assert.Error(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - // inject newMetaTable failure - core, err = NewCore(ctx, coreFactory) - core.SetEtcdClient(etcdCli) - require.Nil(t, err) - assert.NoError(t, err) - randVal = rand.Int() - - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - core.kvBaseCreate = func(string) (kv.TxnKV, error) { - kv := memkv.NewMemoryKV() - return &loadPrefixFailKV{TxnKV: kv}, nil - } - core.metaKVCreate = func(root string) (kv.MetaKv, error) { - return nil, nil - } - err = core.Init() - assert.Error(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) -} - -func TestRootCoordInitData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - coreFactory := dependency.NewDefaultFactory(true) - Params.Init() - Params.RootCoordCfg.DmlChannelNum = TestDMLChannelNum - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.NoError(t, err) - defer etcdCli.Close() - - core, err := NewCore(ctx, coreFactory) - assert.NoError(t, err) - core.SetEtcdClient(etcdCli) - - randVal := rand.Int() - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - - // 1. normal init - err = core.Init() - assert.NoError(t, err) - - // 2. mock init data error - // firstly delete data - err = core.MetaTable.DeleteCredential(util.UserRoot) - assert.NoError(t, err) - - snapshotKV, err := rootcoord.NewMetaSnapshot(etcdCli, Params.EtcdCfg.MetaRootPath, TimestampPrefix, 7) - assert.NotNil(t, snapshotKV) - assert.NoError(t, err) - txnKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath) - mt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: snapshotKV}) - assert.NoError(t, err) - mockTxnKV := &mockTestTxnKV{ - TxnKV: txnKV, - save: func(key, value string) error { - return fmt.Errorf("save error") - }, - remove: func(key string) error { return txnKV.Remove(key) }, - load: func(key string) (string, error) { return txnKV.Load(key) }, - loadWithPrefix: func(key string) ([]string, []string, error) { return txnKV.LoadWithPrefix(key) }, - } - //mt.txn = mockTxnKV - mt.catalog = &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: snapshotKV} - core.MetaTable = mt - err = core.initData() - assert.Error(t, err) -} - -func TestRootCoord_Base(t *testing.T) { - const ( - dbName = "testDb" - collName = "testColl" - collName2 = "testColl2" - aliasName = "alias1" - partName = "testPartition" - segID = 1001 - ) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - coreFactory := dependency.NewDefaultFactory(true) - Params.Init() - Params.RootCoordCfg.DmlChannelNum = TestDMLChannelNum - Params.RootCoordCfg.ImportIndexCheckInterval = 0.1 - Params.RootCoordCfg.ImportIndexWaitLimit = 0.2 - Params.RootCoordCfg.ImportSegmentStateCheckInterval = 0.1 - Params.RootCoordCfg.ImportSegmentStateWaitLimit = 0.2 - core, err := NewCore(context.WithValue(ctx, ctxKey{}, ""), coreFactory) - assert.NoError(t, err) - randVal := rand.Int() - Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("rootcoord-time-tick-%d", randVal) - Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("rootcoord-statistics-%d", randVal) - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - Params.CommonCfg.RootCoordSubName = fmt.Sprintf("subname-%d", randVal) - Params.CommonCfg.RootCoordDml = fmt.Sprintf("rootcoord-dml-test-%d", randVal) - Params.CommonCfg.RootCoordDelta = fmt.Sprintf("rootcoord-delta-test-%d", randVal) - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.NoError(t, err) - defer etcdCli.Close() - - sessKey := path.Join(Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot) - _, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix()) - assert.NoError(t, err) - defer func() { - _, _ = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix()) - }() - - pnb, err := json.Marshal( - &sessionutil.Session{ - ServerID: 100, - }, - ) - assert.NoError(t, err) - _, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyRole+"-100"), string(pnb)) - assert.NoError(t, err) - - pnm := &proxyMock{ - collArray: make([]string, 0, 16), - mutex: sync.Mutex{}, - } - core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return pnm, nil - } - - dm := &dataMock{randVal: randVal} - err = core.SetDataCoord(ctx, dm) - assert.NoError(t, err) - - im := &indexMock{} - err = core.SetIndexCoord(im) - assert.NoError(t, err) - - qm := &queryMock{ - collID: nil, - mutex: sync.Mutex{}, - } - err = core.SetQueryCoord(qm) - assert.NoError(t, err) - - tmpFactory := dependency.NewDefaultFactory(true) - - dmlStream, _ := tmpFactory.NewMsgStream(ctx) - defer dmlStream.Close() - - core.SetEtcdClient(etcdCli) - - err = core.Init() - assert.NoError(t, err) - - var localTSO uint64 - localTSOLock := sync.RWMutex{} - core.TSOAllocator = func(c uint32) (uint64, error) { - localTSOLock.Lock() - defer localTSOLock.Unlock() - localTSO += uint64(c) - return localTSO, nil - } - - expireOldTasksInterval = 500 - err = core.Start() - assert.NoError(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - time.Sleep(100 * time.Millisecond) - shardsNum := int32(8) - - var wg sync.WaitGroup - - wg.Add(1) - t.Run("create collection", func(t *testing.T) { - defer wg.Done() - schema := schemapb.CollectionSchema{ - Name: collName, - AutoID: true, - Fields: []*schemapb.FieldSchema{ - { - FieldID: 100, - Name: "vector", - IsPrimaryKey: false, - Description: "vector", - DataType: schemapb.DataType_FloatVector, - TypeParams: nil, - IndexParams: []*commonpb.KeyValuePair{ - { - Key: "ik1", - Value: "iv1", - }, - }, - }, - }, - } - sbf, err := proto.Marshal(&schema) +func TestRootCoord_CreateCollection(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{}) assert.NoError(t, err) - req := &milvuspb.CreateCollectionRequest{ + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_DropCollection(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.DropCollection(ctx, &milvuspb.DropCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.DropCollection(ctx, &milvuspb.DropCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.DropCollection(ctx, &milvuspb.DropCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.DropCollection(ctx, &milvuspb.DropCollectionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_DescribeCollection(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestRootCoord_HasCollection(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.HasCollection(ctx, &milvuspb.HasCollectionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestRootCoord_ShowCollections(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestRootCoord_CreatePartition(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_DropPartition(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.DropPartition(ctx, &milvuspb.DropPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.DropPartition(ctx, &milvuspb.DropPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.DropPartition(ctx, &milvuspb.DropPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.DropPartition(ctx, &milvuspb.DropPartitionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_HasPartition(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + + ctx := context.Background() + resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + + ctx := context.Background() + resp, err := c.HasPartition(ctx, &milvuspb.HasPartitionRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestRootCoord_ShowPartitions(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + ctx := context.Background() + resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + ctx := context.Background() + resp, err := c.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + +func TestRootCoord_CreateAlias(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.CreateAlias(ctx, &milvuspb.CreateAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.CreateAlias(ctx, &milvuspb.CreateAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + ctx := context.Background() + resp, err := c.CreateAlias(ctx, &milvuspb.CreateAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + ctx := context.Background() + resp, err := c.CreateAlias(ctx, &milvuspb.CreateAliasRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_DropAlias(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.DropAlias(ctx, &milvuspb.DropAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.DropAlias(ctx, &milvuspb.DropAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + ctx := context.Background() + resp, err := c.DropAlias(ctx, &milvuspb.DropAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + ctx := context.Background() + resp, err := c.DropAlias(ctx, &milvuspb.DropAliasRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_AlterAlias(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.AlterAlias(ctx, &milvuspb.AlterAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to add task", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withInvalidScheduler()) + + ctx := context.Background() + resp, err := c.AlterAlias(ctx, &milvuspb.AlterAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("failed to execute", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withTaskFailScheduler()) + ctx := context.Background() + resp, err := c.AlterAlias(ctx, &milvuspb.AlterAliasRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode(), + withValidScheduler()) + ctx := context.Background() + resp, err := c.AlterAlias(ctx, &milvuspb.AlterAliasRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_AllocTimestamp(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.AllocTimestamp(ctx, &rootcoordpb.AllocTimestampRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to allocate ts", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withInvalidTsoAllocator()) + resp, err := c.AllocTimestamp(ctx, &rootcoordpb.AllocTimestampRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case", func(t *testing.T) { + alloc := newMockTsoAllocator() + count := uint32(10) + ts := Timestamp(100) + alloc.GenerateTSOF = func(count uint32) (uint64, error) { + // end ts + return ts, nil + } + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withTsoAllocator(alloc)) + resp, err := c.AllocTimestamp(ctx, &rootcoordpb.AllocTimestampRequest{Count: count}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + // begin ts + assert.Equal(t, ts-uint64(count)+1, resp.GetTimestamp()) + assert.Equal(t, count, resp.GetCount()) + }) +} + +func TestRootCoord_AllocID(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.AllocID(ctx, &rootcoordpb.AllocIDRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("failed to allocate id", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withInvalidIDAllocator()) + resp, err := c.AllocID(ctx, &rootcoordpb.AllocIDRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case", func(t *testing.T) { + alloc := newMockIDAllocator() + id := UniqueID(100) + alloc.AllocF = func(count uint32) (allocator.UniqueID, allocator.UniqueID, error) { + return id, id + int64(count), nil + } + count := uint32(10) + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withIDAllocator(alloc)) + resp, err := c.AllocID(ctx, &rootcoordpb.AllocIDRequest{Count: count}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + assert.Equal(t, id, resp.GetID()) + assert.Equal(t, count, resp.GetCount()) + }) +} + +func TestRootCoord_UpdateChannelTimeTick(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.UpdateChannelTimeTick(ctx, &internalpb.ChannelTimeTickMsg{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("invalid msg type", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withHealthyCode()) + resp, err := c.UpdateChannelTimeTick(ctx, &internalpb.ChannelTimeTickMsg{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) + + t.Run("invalid msg", func(t *testing.T) { + defer cleanTestEnv() + + ticker := newRocksMqTtSynchronizer() + + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withTtSynchronizer(ticker)) + + // the length of channel names & timestamps mismatch. + resp, err := c.UpdateChannelTimeTick(ctx, &internalpb.ChannelTimeTickMsg{ Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 100, - Timestamp: 100, - SourceID: 100, - }, - DbName: dbName, - CollectionName: collName, - Schema: sbf, - ShardsNum: shardsNum, - } - status, err := core.CreateCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - assert.Equal(t, shardsNum, int32(core.chanTimeTick.getDmlChannelNum())) - - createMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - dmlStream.AsConsumer([]string{createMeta.PhysicalChannelNames[0]}, Params.CommonCfg.RootCoordSubName) - dmlStream.Start() - - pChanMap := core.MetaTable.ListCollectionPhysicalChannels() - assert.Greater(t, len(pChanMap[createMeta.CollectionID]), 0) - vChanMap := core.MetaTable.ListCollectionVirtualChannels() - assert.Greater(t, len(vChanMap[createMeta.CollectionID]), 0) - - // get CreateCollectionMsg - msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) - assert.Equal(t, 1, len(msgs)) - createMsg, ok := (msgs[0]).(*msgstream.CreateCollectionMsg) - assert.True(t, ok) - assert.Equal(t, createMeta.CollectionID, createMsg.CollectionID) - assert.Equal(t, 1, len(createMeta.Partitions)) - assert.Equal(t, createMeta.Partitions[0].PartitionID, createMsg.PartitionID) - assert.Equal(t, createMeta.Partitions[0].PartitionName, createMsg.PartitionName) - assert.Equal(t, shardsNum, int32(len(createMeta.VirtualChannelNames))) - assert.Equal(t, shardsNum, int32(len(createMeta.PhysicalChannelNames))) - assert.Equal(t, shardsNum, createMeta.ShardsNum) - - vChanName := createMeta.VirtualChannelNames[0] - assert.Equal(t, createMeta.PhysicalChannelNames[0], funcutil.ToPhysicalChannel(vChanName)) - - // get TimeTickMsg - //msgPack, ok = <-dmlStream.Chan() - //assert.True(t, ok) - //assert.Equal(t, 1, len(msgPack.Msgs)) - //ddm, ok := (msgPack.Msgs[0]).(*msgstream.TimeTickMsg) - //assert.True(t, ok) - //assert.Greater(t, ddm.Base.Timestamp, uint64(0)) - core.chanTimeTick.lock.Lock() - assert.Equal(t, len(core.chanTimeTick.sess2ChanTsMap), 2) - pt, ok := core.chanTimeTick.sess2ChanTsMap[core.session.ServerID] - assert.True(t, ok) - assert.Equal(t, shardsNum, int32(len(pt.chanTsMap))) - for chanName, ts := range pt.chanTsMap { - assert.Contains(t, createMeta.PhysicalChannelNames, chanName) - assert.Equal(t, pt.defaultTs, ts) - } - core.chanTimeTick.lock.Unlock() - - // check invalid operation - req.Base.MsgID = 101 - req.Base.Timestamp = 101 - req.Base.SourceID = 101 - status, err = core.CreateCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - - req.Base.MsgID = 102 - req.Base.Timestamp = 102 - req.Base.SourceID = 102 - req.CollectionName = "testColl-again" - status, err = core.CreateCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - - schema.Name = req.CollectionName - sbf, err = proto.Marshal(&schema) - assert.NoError(t, err) - req.Schema = sbf - req.Base.MsgID = 103 - req.Base.Timestamp = 103 - req.Base.SourceID = 103 - status, err = core.CreateCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - }) - - wg.Add(1) - t.Run("has collection", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 110, - Timestamp: 110, - SourceID: 110, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err := core.HasCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, true, rsp.Value) - - req = &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 111, - Timestamp: 111, - SourceID: 111, - }, - DbName: dbName, - CollectionName: "testColl2", - } - rsp, err = core.HasCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, false, rsp.Value) - - // test time stamp go back - req = &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 111, - Timestamp: 111, - SourceID: 111, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err = core.HasCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, true, rsp.Value) - }) - - wg.Add(1) - t.Run("describe collection", func(t *testing.T) { - defer wg.Done() - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - req := &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 120, - Timestamp: 120, - SourceID: 120, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err := core.DescribeCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) - assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames))) - assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames))) - assert.Equal(t, shardsNum, rsp.ShardsNum) - }) - - wg.Add(1) - t.Run("show collection", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.ShowCollectionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowCollections, - MsgID: 130, - Timestamp: 130, - SourceID: 130, - }, - DbName: dbName, - } - rsp, err := core.ShowCollections(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.ElementsMatch(t, rsp.CollectionNames, []string{collName, "testColl-again"}) - assert.Equal(t, len(rsp.CollectionNames), 2) - }) - - wg.Add(1) - t.Run("create partition", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.CreatePartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreatePartition, - MsgID: 140, - Timestamp: 140, - SourceID: 140, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - clearMsgChan(10*time.Millisecond, dmlStream.Chan()) - status, err := core.CreatePartition(ctx, req) - assert.NoError(t, err) - t.Log(status.Reason) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - assert.Equal(t, 2, len(collMeta.Partitions)) - partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0) - assert.NoError(t, err) - assert.Equal(t, partName, partNameIdx1) - - msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) - assert.Equal(t, 1, len(msgs)) - partMsg, ok := (msgs[0]).(*msgstream.CreatePartitionMsg) - assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, partMsg.CollectionID) - assert.Equal(t, collMeta.Partitions[1].PartitionID, partMsg.PartitionID) - - assert.Equal(t, 1, len(pnm.GetCollIDs())) - assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[0]) - }) - - wg.Add(1) - t.Run("has partition", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.HasPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasPartition, - MsgID: 150, - Timestamp: 150, - SourceID: 150, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - rsp, err := core.HasPartition(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, true, rsp.Value) - }) - - wg.Add(1) - t.Run("show partition", func(t *testing.T) { - defer wg.Done() - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - req := &milvuspb.ShowPartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowPartitions, - MsgID: 160, - Timestamp: 160, - SourceID: 160, - }, - DbName: dbName, - CollectionName: collName, - CollectionID: coll.CollectionID, - } - rsp, err := core.ShowPartitions(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, 2, len(rsp.PartitionNames)) - assert.Equal(t, 2, len(rsp.PartitionIDs)) - }) - - wg.Add(1) - t.Run("show segment", func(t *testing.T) { - defer wg.Done() - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - partID := coll.Partitions[1].PartitionID - dm.mu.Lock() - dm.segs = []typeutil.UniqueID{1000, 1001, 1002, 1003, 1004, 1005} - dm.mu.Unlock() - - req := &milvuspb.ShowSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowSegments, - MsgID: 170, - Timestamp: 170, - SourceID: 170, - }, - CollectionID: coll.CollectionID, - PartitionID: partID, - } - rsp, err := core.ShowSegments(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, int64(1000), rsp.SegmentIDs[0]) - assert.Equal(t, int64(1001), rsp.SegmentIDs[1]) - assert.Equal(t, int64(1002), rsp.SegmentIDs[2]) - assert.Equal(t, int64(1003), rsp.SegmentIDs[3]) - assert.Equal(t, int64(1004), rsp.SegmentIDs[4]) - assert.Equal(t, int64(1005), rsp.SegmentIDs[5]) - assert.Equal(t, 6, len(rsp.SegmentIDs)) - }) - - wg.Add(1) - t.Run("count complete index", func(t *testing.T) { - defer wg.Done() - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - // Normal case. - done, err := core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) - assert.NoError(t, err) - assert.Equal(t, true, done) - // Case with an empty result. - done, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""), collName, coll.CollectionID, []UniqueID{}) - assert.NoError(t, err) - assert.Equal(t, true, done) - // Case where GetIndexStates failed with error. - _, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnError), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) - assert.Error(t, err) - // Case where GetIndexStates failed with bad status. - _, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnUnsuccessfulStatus), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) - assert.Error(t, err) - // Case where describing segment fails, which is not considered as an error. - _, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""), - collName, coll.CollectionID, []UniqueID{9000, 9001, 9002}) - assert.NoError(t, err) - }) - - wg.Add(1) - t.Run("import", func(t *testing.T) { - defer wg.Done() - tID := typeutil.UniqueID(0) - core.importManager.idAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - tID++ - return tID, 0, nil - } - req := &milvuspb.ImportRequest{ - CollectionName: collName, - PartitionName: partName, - RowBased: true, - Files: []string{"f1", "f2", "f3"}, - } - coll, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - core.MetaTable.collName2ID[collName] = coll.CollectionID - rsp, err := core.Import(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - wg.Add(1) - t.Run("import with collection ID not found", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.ImportRequest{ - CollectionName: "bad name", - PartitionName: partName, - RowBased: true, - Files: []string{"f1", "f2", "f3"}, - } - _, err := core.Import(ctx, req) - assert.Error(t, err) - }) - - wg.Add(1) - t.Run("get import state", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.GetImportStateRequest{ - Task: 1, - } - rsp, err := core.GetImportState(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - wg.Add(1) - t.Run("list import tasks", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.ListImportTasksRequest{} - rsp, err := core.ListImportTasks(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - }) - - wg.Add(1) - t.Run("report import update import task fail", func(t *testing.T) { - defer wg.Done() - // Case where report import request is nil. - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), nil) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UpdateImportTaskFailure, resp.ErrorCode) - }) - - wg.Add(1) - t.Run("report import collection name not found", func(t *testing.T) { - defer wg.Done() - var tID = typeutil.UniqueID(100) - core.importManager.idAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - tID++ - return tID, 0, nil - } - core.MetaTable.collName2ID["new"+collName] = 123 - core.MetaTable.collID2Meta[123] = model.Collection{ - CollectionID: 123, - Partitions: []*model.Partition{ - { - PartitionID: 456, - PartitionName: "testPartition", - }, - }, - } - req := &milvuspb.ImportRequest{ - CollectionName: "new" + collName, - PartitionName: partName, - RowBased: true, - Files: []string{"f1", "f2", "f3"}, - } - rsp, err := core.Import(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - delete(core.MetaTable.collName2ID, "new"+collName) - delete(core.MetaTable.collID2Meta, 123) - - reqIR := &rootcoordpb.ImportResult{ - TaskId: 101, - RowCount: 100, - Segments: []int64{1003, 1004, 1005}, - State: commonpb.ImportState_ImportPersisted, - } - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), reqIR) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_CollectionNameNotFound, resp.ErrorCode) - }) - - wg.Add(1) - t.Run("report import with transitional state", func(t *testing.T) { - defer wg.Done() - req := &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - Segments: []int64{1000, 1001, 1002}, - State: commonpb.ImportState_ImportDownloaded, - } - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - time.Sleep(500 * time.Millisecond) - }) - - wg.Add(1) - t.Run("report import with alloc seg state", func(t *testing.T) { - defer wg.Done() - req := &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - Segments: []int64{1000, 1001, 1002}, - State: commonpb.ImportState_ImportAllocSegment, - } - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - time.Sleep(500 * time.Millisecond) - }) - - wg.Add(1) - t.Run("report import wait for index", func(t *testing.T) { - defer wg.Done() - core.CallGetSegmentInfoService = func(ctx context.Context, collectionID int64, - segIDs []int64) (*querypb.GetSegmentInfoResponse, error) { - return &querypb.GetSegmentInfoResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - Infos: []*querypb.SegmentInfo{ - {SegmentID: 1000}, - {SegmentID: 1001}, - {SegmentID: 1002}, - }, - }, nil - } - req := &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - Segments: []int64{1000, 1001, 1002}, - State: commonpb.ImportState_ImportPersisted, - } - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - time.Sleep(500 * time.Millisecond) - }) - - wg.Add(1) - t.Run("report import bring segments online with set segment state fail", func(t *testing.T) { - defer wg.Done() - req := &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - Segments: []int64{999}, /* pre-injected failure for segment ID = 999 */ - State: commonpb.ImportState_ImportPersisted, - } - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode) - }) - - wg.Add(1) - t.Run("report import segments update already failed task", func(t *testing.T) { - defer wg.Done() - // Mark task 0 as failed. - core.importManager.updateTaskState( - &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - State: commonpb.ImportState_ImportFailed, - Segments: []int64{1000, 1001, 1002}, - }) - // Now try to update this task with a complete status. - resp, err := core.ReportImport(context.WithValue(ctx, ctxKey{}, ""), - &rootcoordpb.ImportResult{ - TaskId: 1, - RowCount: 100, - State: commonpb.ImportState_ImportPersisted, - Segments: []int64{1000, 1001, 1002}, - }) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UpdateImportTaskFailure, resp.ErrorCode) - }) - - wg.Add(1) - t.Run("drop partition", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.DropPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropPartition, - MsgID: 220, - Timestamp: 220, - SourceID: 220, - }, - DbName: dbName, - CollectionName: collName, - PartitionName: partName, - } - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - dropPartID := collMeta.Partitions[1].PartitionID - status, err := core.DropPartition(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - collMeta, err = core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - assert.Equal(t, 1, len(collMeta.Partitions)) - partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0) - assert.NoError(t, err) - assert.Equal(t, Params.CommonCfg.DefaultPartitionName, partName) - - msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) - assert.Equal(t, 1, len(msgs)) - dmsg, ok := (msgs[0]).(*msgstream.DropPartitionMsg) - assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID) - assert.Equal(t, dropPartID, dmsg.PartitionID) - - assert.Equal(t, 2, len(pnm.GetCollIDs())) - assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[1]) - }) - - wg.Add(1) - t.Run("remove DQL msgstream", func(t *testing.T) { - defer wg.Done() - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - - req := &proxypb.ReleaseDQLMessageStreamRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_RemoveQueryChannels, - SourceID: core.session.ServerID, - }, - CollectionID: collMeta.CollectionID, - } - status, err := core.ReleaseDQLMessageStream(core.ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - }) - - wg.Add(1) - t.Run("drop collection", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 230, - Timestamp: 230, - SourceID: 230, - }, - DbName: dbName, - CollectionName: collName, - } - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - status, err := core.DropCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - vChanName := collMeta.VirtualChannelNames[0] - assert.Equal(t, collMeta.PhysicalChannelNames[0], funcutil.ToPhysicalChannel(vChanName)) - - msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) - assert.Equal(t, 1, len(msgs)) - dmsg, ok := (msgs[0]).(*msgstream.DropCollectionMsg) - assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID) - collIDs := pnm.GetCollIDs() - assert.Equal(t, 3, len(collIDs)) - assert.Equal(t, collMeta.CollectionID, collIDs[2]) - - time.Sleep(100 * time.Millisecond) - qm.mutex.Lock() - assert.Equal(t, 1, len(qm.collID)) - assert.Equal(t, collMeta.CollectionID, qm.collID[0]) - qm.mutex.Unlock() - - req = &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 231, - Timestamp: 231, - SourceID: 231, - }, - DbName: dbName, - CollectionName: collName, - } - status, err = core.DropCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode) - time.Sleep(100 * time.Millisecond) - collIDs = pnm.GetCollIDs() - assert.Equal(t, 3, len(collIDs)) - assert.Equal(t, collMeta.CollectionID, collIDs[2]) - }) - - wg.Add(1) - t.Run("context_cancel", func(t *testing.T) { - defer wg.Done() - ctx2, cancel2 := context.WithTimeout(ctx, time.Millisecond*100) - defer cancel2() - time.Sleep(100 * time.Millisecond) - st, err := core.CreateCollection(ctx2, &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 1000, - Timestamp: 1000, - SourceID: 1000, + MsgType: commonpb.MsgType_TimeTick, }, + ChannelNames: []string{funcutil.GenRandomStr()}, + Timestamps: []uint64{}, }) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropCollection(ctx2, &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 1001, - Timestamp: 1001, - SourceID: 1001, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp1, err := core.HasCollection(ctx2, &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 1002, - Timestamp: 1002, - SourceID: 1002, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp1.Status.ErrorCode) - - rsp2, err := core.DescribeCollection(ctx2, &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 1003, - Timestamp: 1003, - SourceID: 1003, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp2.Status.ErrorCode) - - rsp3, err := core.ShowCollections(ctx2, &milvuspb.ShowCollectionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowCollections, - MsgID: 1004, - Timestamp: 1004, - SourceID: 1004, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp3.Status.ErrorCode) - - st, err = core.CreatePartition(ctx2, &milvuspb.CreatePartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreatePartition, - MsgID: 1005, - Timestamp: 1005, - SourceID: 1005, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropPartition(ctx2, &milvuspb.DropPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropPartition, - MsgID: 1006, - Timestamp: 1006, - SourceID: 1006, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp4, err := core.HasPartition(ctx2, &milvuspb.HasPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasPartition, - MsgID: 1007, - Timestamp: 1007, - SourceID: 1007, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp4.Status.ErrorCode) - - rsp5, err := core.ShowPartitions(ctx2, &milvuspb.ShowPartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowPartitions, - MsgID: 1008, - Timestamp: 1008, - SourceID: 1008, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp5.Status.ErrorCode) - - rsp8, err := core.ShowSegments(ctx2, &milvuspb.ShowSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowSegments, - MsgID: 1013, - Timestamp: 1013, - SourceID: 1013, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp8.Status.ErrorCode) - time.Sleep(1 * time.Second) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) }) - wg.Add(1) - t.Run("undefined req type", func(t *testing.T) { - defer wg.Done() - st, err := core.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2000, - Timestamp: 2000, - SourceID: 2000, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropCollection(ctx, &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2001, - Timestamp: 2001, - SourceID: 2001, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp1, err := core.HasCollection(ctx, &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2002, - Timestamp: 2002, - SourceID: 2002, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp1.Status.ErrorCode) - - rsp2, err := core.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2003, - Timestamp: 2003, - SourceID: 2003, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp2.Status.ErrorCode) - - rsp3, err := core.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2004, - Timestamp: 2004, - SourceID: 2004, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp3.Status.ErrorCode) - - st, err = core.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2005, - Timestamp: 2005, - SourceID: 2005, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropPartition(ctx, &milvuspb.DropPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2006, - Timestamp: 2006, - SourceID: 2006, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp4, err := core.HasPartition(ctx, &milvuspb.HasPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2007, - Timestamp: 2007, - SourceID: 2007, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp4.Status.ErrorCode) - - rsp5, err := core.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2008, - Timestamp: 2008, - SourceID: 2008, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp5.Status.ErrorCode) - - rsp8, err := core.ShowSegments(ctx, &milvuspb.ShowSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 2013, - Timestamp: 2013, - SourceID: 2013, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp8.Status.ErrorCode) - - }) - - wg.Add(1) - t.Run("alloc time tick", func(t *testing.T) { - defer wg.Done() - req := &rootcoordpb.AllocTimestampRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 3000, - Timestamp: 3000, - SourceID: 3000, - }, - Count: 1, - } - rsp, err := core.AllocTimestamp(ctx, req) - assert.NoError(t, err) - assert.Equal(t, uint32(1), rsp.Count) - assert.NotZero(t, rsp.Timestamp) - }) - - wg.Add(1) - t.Run("alloc id", func(t *testing.T) { - defer wg.Done() - req := &rootcoordpb.AllocIDRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 3001, - Timestamp: 3001, - SourceID: 3001, - }, - Count: 1, - } - rsp, err := core.AllocID(ctx, req) - assert.NoError(t, err) - assert.Equal(t, uint32(1), rsp.Count) - assert.NotZero(t, rsp.ID) - }) - - wg.Add(1) - t.Run("get_channels", func(t *testing.T) { - defer wg.Done() - _, err := core.GetTimeTickChannel(ctx) - assert.NoError(t, err) - _, err = core.GetStatisticsChannel(ctx) - assert.NoError(t, err) - }) - - wg.Add(1) - t.Run("channel timetick", func(t *testing.T) { - defer wg.Done() - const ( - proxyIDInvalid = 102 - ts0 = uint64(20) - ts1 = uint64(40) - ts2 = uint64(60) - ) - numChan := core.chanTimeTick.getDmlChannelNum() - p1 := sessionutil.Session{ - ServerID: 100, - } - p2 := sessionutil.Session{ - ServerID: 101, - } - ctx2, cancel2 := context.WithTimeout(ctx, rootcoord.RequestTimeout) - defer cancel2() - s1, err := json.Marshal(&p1) - assert.NoError(t, err) - s2, err := json.Marshal(&p2) - assert.NoError(t, err) - - proxy1 := path.Join(sessKey, typeutil.ProxyRole) + "-1" - proxy2 := path.Join(sessKey, typeutil.ProxyRole) + "-2" - _, err = core.etcdCli.Put(ctx2, proxy1, string(s1)) - assert.NoError(t, err) - _, err = core.etcdCli.Put(ctx2, proxy2, string(s2)) - assert.NoError(t, err) - time.Sleep(100 * time.Millisecond) - - cns := core.chanTimeTick.getDmlChannelNames(3) - cn0 := cns[0] - cn1 := cns[1] - cn2 := cns[2] - core.chanTimeTick.addDmlChannels(cns...) - - // wait for local channel reported - for { - core.chanTimeTick.lock.Lock() - _, ok := core.chanTimeTick.sess2ChanTsMap[core.session.ServerID].chanTsMap[cn0] - - if !ok { - core.chanTimeTick.lock.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } - - _, ok = core.chanTimeTick.sess2ChanTsMap[core.session.ServerID].chanTsMap[cn1] - - if !ok { - core.chanTimeTick.lock.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } - - _, ok = core.chanTimeTick.sess2ChanTsMap[core.session.ServerID].chanTsMap[cn2] - - if !ok { - core.chanTimeTick.lock.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } - core.chanTimeTick.lock.Unlock() - break - } - msg0 := &internalpb.ChannelTimeTickMsg{ + t.Run("normal case", func(t *testing.T) { + defer cleanTestEnv() + + source := int64(20220824) + ts := Timestamp(100) + defaultTs := Timestamp(101) + + ticker := newRocksMqTtSynchronizer() + ticker.addSession(&sessionutil.Session{ServerID: source}) + + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withTtSynchronizer(ticker)) + + resp, err := c.UpdateChannelTimeTick(ctx, &internalpb.ChannelTimeTickMsg{ Base: &commonpb.MsgBase{ + SourceID: source, MsgType: commonpb.MsgType_TimeTick, - SourceID: 100, }, - ChannelNames: []string{cn0, cn1}, - Timestamps: []uint64{ts0, ts2}, - } - s, _ := core.UpdateChannelTimeTick(ctx, msg0) - assert.Equal(t, commonpb.ErrorCode_Success, s.ErrorCode) - time.Sleep(100 * time.Millisecond) - //t.Log(core.chanTimeTick.sess2ChanTsMap) - - msg1 := &internalpb.ChannelTimeTickMsg{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_TimeTick, - SourceID: 101, - }, - ChannelNames: []string{cn1, cn2}, - Timestamps: []uint64{ts1, ts2}, - } - s, _ = core.UpdateChannelTimeTick(ctx, msg1) - assert.Equal(t, commonpb.ErrorCode_Success, s.ErrorCode) - time.Sleep(100 * time.Millisecond) - - msgInvalid := &internalpb.ChannelTimeTickMsg{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_TimeTick, - SourceID: proxyIDInvalid, - }, - ChannelNames: []string{"test"}, - Timestamps: []uint64{0}, - } - s, _ = core.UpdateChannelTimeTick(ctx, msgInvalid) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, s.ErrorCode) - time.Sleep(100 * time.Millisecond) - - // 2 proxy, 1 rootcoord - assert.Equal(t, 3, core.chanTimeTick.getSessionNum()) - - // add 3 proxy channels - assert.Equal(t, 3, core.chanTimeTick.getDmlChannelNum()-numChan) - - _, err = core.etcdCli.Delete(ctx2, proxy1) + ChannelNames: []string{funcutil.GenRandomStr()}, + Timestamps: []uint64{ts}, + DefaultTimestamp: defaultTs, + }) assert.NoError(t, err) - _, err = core.etcdCli.Delete(ctx2, proxy2) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) + }) +} + +func TestRootCoord_InvalidateCollectionMetaCache(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) }) - schema := schemapb.CollectionSchema{ - Name: collName, - } - sbf, err := proto.Marshal(&schema) - assert.NoError(t, err) - req := &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 3011, - Timestamp: 3011, - SourceID: 3011, - }, - DbName: dbName, - CollectionName: collName, - Schema: sbf, - } - status, err := core.CreateCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - wg.Add(1) - t.Run("create alias", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.CreateAliasRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateAlias, - MsgID: 3012, - Timestamp: 3012, - SourceID: 3012, - }, - CollectionName: collName, - Alias: aliasName, - } - rsp, err := core.CreateAlias(ctx, req) + t.Run("failed to invalidate cache", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withInvalidProxyManager()) + resp, err := c.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) }) - wg.Add(1) - t.Run("describe collection2", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 3013, - Timestamp: 3013, - SourceID: 3013, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err := core.DescribeCollection(ctx, req) + t.Run("normal case", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withValidProxyManager()) + resp, err := c.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{}) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, rsp.Aliases, []string{aliasName}) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) }) +} - // temporarily create collName2 - schema = schemapb.CollectionSchema{ - Name: collName2, - } - sbf, err = proto.Marshal(&schema) - assert.NoError(t, err) - req2 := &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 3014, - Timestamp: 3014, - SourceID: 3014, - }, - DbName: dbName, - CollectionName: collName2, - Schema: sbf, - } - status, err = core.CreateCollection(ctx, req2) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - wg.Add(1) - t.Run("alter alias", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.AlterAliasRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_AlterAlias, - MsgID: 3015, - Timestamp: 3015, - SourceID: 3015, - }, - CollectionName: collName2, - Alias: aliasName, - } - rsp, err := core.AlterAlias(ctx, req) +func TestRootCoord_ShowConfigurations(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.ShowConfigurations(ctx, &internalpb.ShowConfigurationsRequest{}) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) }) - wg.Add(1) - t.Run("drop collection with alias", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropAlias, - MsgID: 3016, - Timestamp: 3016, - SourceID: 3016, - }, - CollectionName: aliasName, - } - rsp, err := core.DropCollection(ctx, req) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp.ErrorCode) - }) + t.Run("normal case", func(t *testing.T) { + Params.InitOnce() - wg.Add(1) - t.Run("drop alias", func(t *testing.T) { - defer wg.Done() - req := &milvuspb.DropAliasRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropAlias, - MsgID: 3017, - Timestamp: 3017, - SourceID: 3017, - }, - Alias: aliasName, - } - rsp, err := core.DropAlias(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode) - }) - - status, err = core.DropCollection(ctx, &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 3018, - Timestamp: 3018, - SourceID: 3018, - }, - DbName: dbName, - CollectionName: collName, - }) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - status, err = core.DropCollection(ctx, &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 3019, - Timestamp: 3019, - SourceID: 3019, - }, - DbName: dbName, - CollectionName: collName2, - }) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - wg.Add(1) - t.Run("show configurations", func(t *testing.T) { - defer wg.Done() pattern := "Port" req := &internalpb.ShowConfigurationsRequest{ Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_WatchQueryChannels, - MsgID: rand.Int63(), + MsgID: rand.Int63(), }, Pattern: pattern, } - //server is closed - stateSave := core.stateCode.Load().(internalpb.StateCode) - core.UpdateStateCode(internalpb.StateCode_Abnormal) - resp, err := core.ShowConfigurations(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode) - core.UpdateStateCode(stateSave) - //normal case - resp, err = core.ShowConfigurations(ctx, req) + ctx := context.Background() + c := newTestCore(withHealthyCode()) + resp, err := c.ShowConfigurations(ctx, req) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) - assert.Equal(t, 1, len(resp.Configuations)) - assert.Equal(t, "rootcoord.port", resp.Configuations[0].Key) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + assert.Equal(t, 1, len(resp.GetConfiguations())) + assert.Equal(t, "rootcoord.port", resp.GetConfiguations()[0].Key) + }) +} + +func TestRootCoord_GetMetrics(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + ctx := context.Background() + c := newTestCore(withAbnormalCode()) + resp, err := c.GetMetrics(ctx, &milvuspb.GetMetricsRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) }) - wg.Add(1) - t.Run("get metrics", func(t *testing.T) { - defer wg.Done() - // not healthy - stateSave := core.stateCode.Load().(internalpb.StateCode) - core.UpdateStateCode(internalpb.StateCode_Abnormal) - resp, err := core.GetMetrics(ctx, &milvuspb.GetMetricsRequest{}) + t.Run("failed to parse metric type", func(t *testing.T) { + req := &milvuspb.GetMetricsRequest{ + Request: "invalid request", + } + ctx := context.Background() + c := newTestCore(withHealthyCode()) + resp, err := c.GetMetrics(ctx, req) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) - core.UpdateStateCode(stateSave) - - // failed to parse metric type - invalidRequest := "invalid request" - resp, err = core.GetMetrics(ctx, &milvuspb.GetMetricsRequest{ - Request: invalidRequest, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + t.Run("unsupported metric type", func(t *testing.T) { // unsupported metric type unsupportedMetricType := "unsupported" req, err := metricsinfo.ConstructRequestByMetricType(unsupportedMetricType) assert.NoError(t, err) - resp, err = core.GetMetrics(ctx, req) + ctx := context.Background() + c := newTestCore(withHealthyCode()) + resp, err := c.GetMetrics(ctx, req) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) - - // normal case - systemInfoMetricType := metricsinfo.SystemInfoMetrics - req, err = metricsinfo.ConstructRequestByMetricType(systemInfoMetricType) - assert.NoError(t, err) - resp, err = core.GetMetrics(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) }) - wg.Add(1) - t.Run("get system info", func(t *testing.T) { - defer wg.Done() - // normal case + t.Run("normal case", func(t *testing.T) { systemInfoMetricType := metricsinfo.SystemInfoMetrics req, err := metricsinfo.ConstructRequestByMetricType(systemInfoMetricType) assert.NoError(t, err) - resp, err := core.getSystemInfoMetrics(ctx, req) + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withMetricsCacheManager()) + resp, err := c.GetMetrics(ctx, req) assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) }) - err = core.Stop() - assert.NoError(t, err) - st, err := core.GetComponentStates(ctx) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, st.Status.ErrorCode) - assert.NotEqual(t, internalpb.StateCode_Healthy, st.State.StateCode) - - wg.Add(1) - t.Run("state_not_healthy", func(t *testing.T) { - defer wg.Done() - st, err := core.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - MsgID: 4000, - Timestamp: 4000, - SourceID: 4000, - }, + t.Run("get system info metrics from cache", func(t *testing.T) { + systemInfoMetricType := metricsinfo.SystemInfoMetrics + req, err := metricsinfo.ConstructRequestByMetricType(systemInfoMetricType) + assert.NoError(t, err) + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withMetricsCacheManager()) + c.metricsCacheManager.UpdateSystemInfoMetrics(&milvuspb.GetMetricsResponse{ + Status: succStatus(), + Response: "cached response", + ComponentName: "cached component", }) + resp, err := c.GetMetrics(ctx, req) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropCollection(ctx, &milvuspb.DropCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropCollection, - MsgID: 4001, - Timestamp: 4001, - SourceID: 4001, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp1, err := core.HasCollection(ctx, &milvuspb.HasCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasCollection, - MsgID: 4002, - Timestamp: 4002, - SourceID: 4002, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp1.Status.ErrorCode) - - rsp2, err := core.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 4003, - Timestamp: 4003, - SourceID: 4003, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp2.Status.ErrorCode) - - rsp3, err := core.ShowCollections(ctx, &milvuspb.ShowCollectionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowCollections, - MsgID: 4004, - Timestamp: 4004, - SourceID: 4004, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp3.Status.ErrorCode) - - st, err = core.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreatePartition, - MsgID: 4005, - Timestamp: 4005, - SourceID: 4005, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - st, err = core.DropPartition(ctx, &milvuspb.DropPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DropPartition, - MsgID: 4006, - Timestamp: 4006, - SourceID: 4006, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, st.ErrorCode) - - rsp4, err := core.HasPartition(ctx, &milvuspb.HasPartitionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_HasPartition, - MsgID: 4007, - Timestamp: 4007, - SourceID: 4007, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp4.Status.ErrorCode) - - rsp5, err := core.ShowPartitions(ctx, &milvuspb.ShowPartitionsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowPartitions, - MsgID: 4008, - Timestamp: 4008, - SourceID: 4008, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp5.Status.ErrorCode) - - rsp8, err := core.ShowSegments(ctx, &milvuspb.ShowSegmentsRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_ShowSegments, - MsgID: 4013, - Timestamp: 4013, - SourceID: 4013, - }, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp8.Status.ErrorCode) - - rsp9, err := core.Import(ctx, &milvuspb.ImportRequest{ - CollectionName: "c1", - PartitionName: "p1", - RowBased: true, - Files: []string{"f1", "f2", "f3"}, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp9.Status.ErrorCode) - - rsp10, err := core.GetImportState(ctx, &milvuspb.GetImportStateRequest{ - Task: 0, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp10.Status.ErrorCode) - - rsp11, err := core.ReportImport(ctx, &rootcoordpb.ImportResult{ - RowCount: 0, - }) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp11.ErrorCode) - - rsp12, err := core.ListImportTasks(ctx, &milvuspb.ListImportTasksRequest{}) - assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, rsp12.Status.ErrorCode) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) }) - wg.Add(1) - t.Run("alloc_error", func(t *testing.T) { - defer wg.Done() - core.Stop() - core.IDAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - return 0, 0, fmt.Errorf("id allocator error test") - } - core.TSOAllocator = func(count uint32) (typeutil.Timestamp, error) { - return 0, fmt.Errorf("tso allcoator error test") - } - core.Init() - core.Start() - r1 := &rootcoordpb.AllocTimestampRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 5000, - Timestamp: 5000, - SourceID: 5000, - }, - Count: 1, - } - p1, err := core.AllocTimestamp(ctx, r1) + t.Run("get system info metrics, cache miss", func(t *testing.T) { + systemInfoMetricType := metricsinfo.SystemInfoMetrics + req, err := metricsinfo.ConstructRequestByMetricType(systemInfoMetricType) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, p1.Status.ErrorCode) - - r2 := &rootcoordpb.AllocIDRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_Undefined, - MsgID: 3001, - Timestamp: 3001, - SourceID: 3001, - }, - Count: 1, - } - p2, err := core.AllocID(ctx, r2) + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withMetricsCacheManager()) + c.metricsCacheManager.InvalidateSystemInfoMetrics() + resp, err := c.GetMetrics(ctx, req) assert.NoError(t, err) - assert.NotEqual(t, commonpb.ErrorCode_Success, p2.Status.ErrorCode) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) }) - wg.Wait() - err = core.Stop() - assert.NoError(t, err) -} - -func TestRootCoord2(t *testing.T) { - const ( - dbName = "testDb" - collName = "testColl" - partName = "testPartition" - ) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - msFactory := dependency.NewDefaultFactory(true) - - Params.Init() - Params.RootCoordCfg.DmlChannelNum = TestDMLChannelNum - core, err := NewCore(ctx, msFactory) - assert.NoError(t, err) - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.NoError(t, err) - defer etcdCli.Close() - - randVal := rand.Int() - - Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("rootcoord-time-tick-%d", randVal) - Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("rootcoord-statistics-%d", randVal) - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - Params.CommonCfg.RootCoordSubName = fmt.Sprintf("subname-%d", randVal) - - dm := &dataMock{randVal: randVal} - err = core.SetDataCoord(ctx, dm) - assert.NoError(t, err) - - im := &indexMock{} - err = core.SetIndexCoord(im) - assert.NoError(t, err) - - qm := &queryMock{ - collID: nil, - mutex: sync.Mutex{}, - } - err = core.SetQueryCoord(qm) - assert.NoError(t, err) - - core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return nil, nil - } - - core.SetEtcdClient(etcdCli) - err = core.Init() - assert.NoError(t, err) - - err = core.Start() - assert.NoError(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - time.Sleep(100 * time.Millisecond) - - var wg sync.WaitGroup - wg.Add(1) - t.Run("create collection", func(t *testing.T) { - defer wg.Done() - schema := schemapb.CollectionSchema{ - Name: collName, - } - - sbf, err := proto.Marshal(&schema) + t.Run("get system info metrics", func(t *testing.T) { + systemInfoMetricType := metricsinfo.SystemInfoMetrics + req, err := metricsinfo.ConstructRequestByMetricType(systemInfoMetricType) assert.NoError(t, err) - - req := &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_CreateCollection, - Timestamp: 100, - }, - DbName: dbName, - CollectionName: collName, - Schema: sbf, - } - status, err := core.CreateCollection(ctx, req) + ctx := context.Background() + c := newTestCore(withHealthyCode(), + withMetricsCacheManager()) + resp, err := c.getSystemInfoMetrics(ctx, req) assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) - - collInfo, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - dmlStream, _ := msFactory.NewMsgStream(ctx) - dmlStream.AsConsumer([]string{collInfo.PhysicalChannelNames[0]}, Params.CommonCfg.RootCoordSubName) - dmlStream.Start() - - msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) - assert.Equal(t, 1, len(msgs)) - - m1, ok := (msgs[0]).(*msgstream.CreateCollectionMsg) - assert.True(t, ok) - t.Log("time tick", m1.Base.Timestamp) + assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) }) - - wg.Add(1) - t.Run("describe collection", func(t *testing.T) { - defer wg.Done() - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - req := &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 120, - Timestamp: 120, - SourceID: 120, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err := core.DescribeCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) - assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.VirtualChannelNames))) - assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.PhysicalChannelNames))) - assert.Equal(t, common.DefaultShardsNum, rsp.ShardsNum) - }) - wg.Wait() - err = core.Stop() - assert.NoError(t, err) } -func TestCheckInit(t *testing.T) { - c, err := NewCore(context.Background(), nil) - assert.NoError(t, err) +func TestCore_Import(t *testing.T) { - err = c.Start() - assert.Error(t, err) - - err = c.checkInit() - assert.Error(t, err) - - c.MetaTable = &MetaTable{} - err = c.checkInit() - assert.Error(t, err) - - c.IDAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - return 0, 0, nil - } - err = c.checkInit() - assert.Error(t, err) - - c.IDAllocatorUpdate = func() error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.TSOAllocator = func(count uint32) (typeutil.Timestamp, error) { - return 0, nil - } - err = c.checkInit() - assert.Error(t, err) - - c.TSOAllocatorUpdate = func() error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.etcdCli = &clientv3.Client{} - err = c.checkInit() - assert.Error(t, err) - - c.kvBase = &etcdkv.EtcdKV{} - err = c.checkInit() - assert.Error(t, err) - - c.impTaskKv = &etcdkv.EtcdKV{} - err = c.checkInit() - assert.Error(t, err) - - c.SendDdCreateCollectionReq = func(context.Context, *internalpb.CreateCollectionRequest, []string) (map[string][]byte, error) { - return map[string][]byte{}, nil - } - err = c.checkInit() - assert.Error(t, err) - - c.SendDdDropCollectionReq = func(context.Context, *internalpb.DropCollectionRequest, []string) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.SendDdCreatePartitionReq = func(context.Context, *internalpb.CreatePartitionRequest, []string) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.SendDdDropPartitionReq = func(context.Context, *internalpb.DropPartitionRequest, []string) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { - return nil, nil - } - - err = c.checkInit() - assert.Error(t, err) - - c.CallGetRecoveryInfoService = func(ctx context.Context, collID, partID UniqueID) ([]*datapb.SegmentBinlogs, error) { - return nil, nil - } - - err = c.checkInit() - assert.Error(t, err) - - c.CallDropCollectionIndexService = func(ctx context.Context, collID UniqueID) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallGetSegmentIndexStateService = func(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) { - return nil, nil - } - err = c.checkInit() - assert.Error(t, err) - - c.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return nil, nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallReleaseCollectionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallReleasePartitionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallWatchChannels = func(ctx context.Context, collectionID int64, channelNames []string, startPositions []*commonpb.KeyDataPair) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse { - return &datapb.ImportTaskResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - } - } - err = c.checkInit() - assert.Error(t, err) - - c.CallAddSegRefLock = func(context.Context, int64, []int64) error { - return nil - } - err = c.checkInit() - assert.Error(t, err) - - c.CallReleaseSegRefLock = func(context.Context, int64, []int64) error { - return nil - } - err = c.checkInit() - assert.NoError(t, err) - - err = c.Stop() - assert.NoError(t, err) } -func TestRootCoord_CheckZeroShardsNum(t *testing.T) { - const ( - dbName = "testDb" - collName = "testColl" - ) +func TestCore_GetImportState(t *testing.T) { - shardsNum := int32(2) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - msFactory := dependency.NewDefaultFactory(true) - Params.Init() - Params.RootCoordCfg.DmlChannelNum = TestDMLChannelNum - - core, err := NewCore(ctx, msFactory) - assert.NoError(t, err) - randVal := rand.Int() - Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("rootcoord-time-tick-%d", randVal) - Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("rootcoord-statistics-%d", randVal) - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - Params.CommonCfg.RootCoordSubName = fmt.Sprintf("subname-%d", randVal) - - dm := &dataMock{randVal: randVal} - err = core.SetDataCoord(ctx, dm) - assert.NoError(t, err) - - im := &indexMock{} - err = core.SetIndexCoord(im) - assert.NoError(t, err) - - qm := &queryMock{ - collID: nil, - mutex: sync.Mutex{}, - } - err = core.SetQueryCoord(qm) - assert.NoError(t, err) - - core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return nil, nil - } - - etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg) - assert.NoError(t, err) - defer etcdCli.Close() - - core.SetEtcdClient(etcdCli) - err = core.Init() - assert.NoError(t, err) - - err = core.Start() - assert.NoError(t, err) - - core.session.TriggerKill = false - err = core.Register() - assert.NoError(t, err) - - time.Sleep(100 * time.Millisecond) - - modifyFunc := func(collInfo *model.Collection) { - collInfo.ShardsNum = 0 - } - - createCollectionInMeta(dbName, collName, core, shardsNum, modifyFunc) - - t.Run("describe collection", func(t *testing.T) { - collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) - assert.NoError(t, err) - req := &milvuspb.DescribeCollectionRequest{ - Base: &commonpb.MsgBase{ - MsgType: commonpb.MsgType_DescribeCollection, - MsgID: 120, - Timestamp: 120, - SourceID: 120, - }, - DbName: dbName, - CollectionName: collName, - } - rsp, err := core.DescribeCollection(ctx, req) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) - assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) - assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames))) - assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames))) - assert.Equal(t, shardsNum, rsp.ShardsNum) - }) - err = core.Stop() - assert.NoError(t, err) } -func TestCore_GetComponentStates(t *testing.T) { - n := &Core{} - n.stateCode.Store(internalpb.StateCode_Healthy) - resp, err := n.GetComponentStates(context.Background()) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) - assert.Equal(t, common.NotRegisteredID, resp.State.NodeID) - n.session = &sessionutil.Session{} - n.session.UpdateRegistered(true) - resp, err = n.GetComponentStates(context.Background()) - assert.NoError(t, err) - assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode) +func TestCore_ListImportTasks(t *testing.T) { + } -//func TestCore_DescribeSegments(t *testing.T) { -// collID := typeutil.UniqueID(1) -// partID := typeutil.UniqueID(2) -// segID := typeutil.UniqueID(100) -// fieldID := typeutil.UniqueID(3) -// buildID := typeutil.UniqueID(4) -// indexID := typeutil.UniqueID(1000) -// indexName := "test_describe_segments_index" -// -// c := &Core{ -// ctx: context.Background(), -// } -// -// // not healthy. -// c.stateCode.Store(internalpb.StateCode_Abnormal) -// got1, err := c.DescribeSegments(context.Background(), &rootcoordpb.DescribeSegmentsRequest{}) -// assert.NoError(t, err) -// assert.NotEqual(t, commonpb.ErrorCode_Success, got1.GetStatus().GetErrorCode()) -// -// // failed to be executed. -// c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { -// return []typeutil.UniqueID{segID}, nil -// } -// c.stateCode.Store(internalpb.StateCode_Healthy) -// shortDuration := time.Nanosecond -// shortCtx, cancel := context.WithTimeout(c.ctx, shortDuration) -// defer cancel() -// time.Sleep(shortDuration) -// got2, err := c.DescribeSegments(shortCtx, &rootcoordpb.DescribeSegmentsRequest{}) -// assert.NoError(t, err) -// assert.NotEqual(t, commonpb.ErrorCode_Success, got2.GetStatus().GetErrorCode()) -// -// // success. -// c.MetaTable = &MetaTable{ -// segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID}, -// indexID2Meta: map[typeutil.UniqueID]*model.Index{ -// indexID: { -// IndexName: indexName, -// IndexID: indexID, -// IndexParams: nil, -// CollectionID: collID, -// FieldID: fieldID, -// SegmentIndexes: map[int64]model.SegmentIndex{ -// segID: { -// Segment: model.Segment{ -// PartitionID: partID, -// SegmentID: segID, -// }, -// BuildID: buildID, -// EnableIndex: true}, -// }, -// }, -// }, -// } -// infos, err := c.DescribeSegments(context.Background(), &rootcoordpb.DescribeSegmentsRequest{ -// Base: &commonpb.MsgBase{ -// MsgType: commonpb.MsgType_DescribeSegments, -// MsgID: 0, -// Timestamp: 0, -// SourceID: 0, -// }, -// CollectionID: collID, -// SegmentIDs: []typeutil.UniqueID{segID}, -// }) -// assert.NoError(t, err) -// assert.Equal(t, commonpb.ErrorCode_Success, infos.GetStatus().GetErrorCode()) -// assert.Equal(t, 1, len(infos.GetSegmentInfos())) -// segmentInfo, ok := infos.GetSegmentInfos()[segID] -// assert.True(t, ok) -// assert.Equal(t, 1, len(segmentInfo.GetIndexInfos())) -// assert.Equal(t, collID, segmentInfo.GetIndexInfos()[0].GetCollectionID()) -// assert.Equal(t, partID, segmentInfo.GetIndexInfos()[0].GetPartitionID()) -// assert.Equal(t, segID, segmentInfo.GetIndexInfos()[0].GetSegmentID()) -// assert.Equal(t, fieldID, segmentInfo.GetIndexInfos()[0].GetFieldID()) -// assert.Equal(t, indexID, segmentInfo.GetIndexInfos()[0].GetIndexID()) -// assert.Equal(t, buildID, segmentInfo.GetIndexInfos()[0].GetBuildID()) -// assert.Equal(t, true, segmentInfo.GetIndexInfos()[0].GetEnableIndex()) -// -// indexInfo, ok := segmentInfo.GetExtraIndexInfos()[indexID] -// assert.True(t, ok) -// assert.Equal(t, indexName, indexInfo.IndexName) -// assert.Equal(t, indexID, indexInfo.IndexID) -//} +func TestCore_ReportImport(t *testing.T) { -func TestCore_getCollectionName(t *testing.T) { - mt := &MetaTable{ - ddLock: sync.RWMutex{}, - collID2Meta: make(map[int64]model.Collection), - } - - core := &Core{ - MetaTable: mt, - } - - collName, partName, err := core.getCollectionName(1, 2) - assert.Error(t, err) - assert.Empty(t, collName) - assert.Empty(t, partName) - - mt.collID2Meta[1] = model.Collection{ - Name: "dummy", - Partitions: make([]*model.Partition, 0), - } - - collName, partName, err = core.getCollectionName(1, 2) - assert.Error(t, err) - assert.Equal(t, "dummy", collName) - assert.Empty(t, partName) - - mt.collID2Meta[1] = model.Collection{ - Name: "dummy", - Partitions: []*model.Partition{ - { - PartitionID: 2, - PartitionName: "p2", - }, - }, - } - - collName, partName, err = core.getCollectionName(1, 2) - assert.Nil(t, err) - assert.Equal(t, "dummy", collName) - assert.Equal(t, "p2", partName) } -//func TestCore_GetIndexState(t *testing.T) { -// var ( -// collName = "collName" -// fieldName = "fieldName" -// indexName = "indexName" -// ) -// mt := &MetaTable{ -// ddLock: sync.RWMutex{}, -// collID2Meta: map[typeutil.UniqueID]model.Collection{ -// 1: { -// FieldIDToIndexID: []common.Int64Tuple{ -// { -// Key: 1, -// Value: 1, -// }, -// }, -// }, -// }, -// collName2ID: map[string]typeutil.UniqueID{ -// collName: 2, -// }, -// indexID2Meta: map[typeutil.UniqueID]*model.Index{ -// 1: { -// IndexID: 1, -// IndexName: indexName, -// SegmentIndexes: map[int64]model.SegmentIndex{ -// 3: { -// Segment: model.Segment{ -// SegmentID: 3, -// }, -// EnableIndex: false, -// BuildID: 1, -// }, -// }, -// }, -// }, -// segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{3: 1}, -// } -// -// core := &Core{ -// MetaTable: mt, -// } -// req := &milvuspb.GetIndexStateRequest{ -// CollectionName: collName, -// FieldName: fieldName, -// IndexName: indexName, -// } -// core.stateCode.Store(internalpb.StateCode_Abnormal) -// -// t.Run("core not healthy", func(t *testing.T) { -// resp, err := core.GetIndexState(context.Background(), req) -// assert.Nil(t, err) -// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.GetErrorCode()) -// }) -// -// core.stateCode.Store(internalpb.StateCode_Healthy) -// -// t.Run("get init buildiDs failed", func(t *testing.T) { -// resp, err := core.GetIndexState(context.Background(), req) -// assert.Nil(t, err) -// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.GetErrorCode()) -// }) -// -// core.MetaTable.collName2ID[collName] = 1 -// -// t.Run("number of buildIDs is zero", func(t *testing.T) { -// core.CallGetIndexStatesService = func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error) { -// return []*indexpb.IndexInfo{}, nil -// } -// resp, err := core.GetIndexState(context.Background(), req) -// assert.NoError(t, err) -// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.GetErrorCode()) -// }) -// -// t.Run("CallGetIndexStatesService failed", func(t *testing.T) { -// core.MetaTable.indexID2Meta[1].SegmentIndexes[3] = model.SegmentIndex{ -// Segment: model.Segment{ -// SegmentID: 3, -// }, -// EnableIndex: true, -// BuildID: 1, -// } -// core.CallGetIndexStatesService = func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error) { -// return nil, errors.New("error occurred") -// } -// -// resp, err := core.GetIndexState(context.Background(), req) -// assert.Error(t, err) -// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.GetErrorCode()) -// }) -// -// t.Run("success", func(t *testing.T) { -// core.CallGetIndexStatesService = func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error) { -// return []*indexpb.IndexInfo{ -// { -// State: commonpb.IndexState_Finished, -// IndexBuildID: 1, -// }, -// }, nil -// } -// resp, err := core.GetIndexState(context.Background(), req) -// assert.NoError(t, err) -// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.GetErrorCode()) -// }) -//} +func TestCore_CountCompleteIndex(t *testing.T) { + +} func TestCore_Rbac(t *testing.T) { ctx := context.Background() diff --git a/internal/rootcoord/scheduler.go b/internal/rootcoord/scheduler.go new file mode 100644 index 0000000000..bffd2bcc5e --- /dev/null +++ b/internal/rootcoord/scheduler.go @@ -0,0 +1,100 @@ +package rootcoord + +import ( + "context" + "sync" + + "github.com/milvus-io/milvus/internal/tso" + + "github.com/milvus-io/milvus/internal/allocator" +) + +type IScheduler interface { + Start() + Stop() + AddTask(t taskV2) error +} + +type scheduler struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + idAllocator allocator.GIDAllocator + tsoAllocator tso.Allocator + + taskChan chan taskV2 +} + +func newScheduler(ctx context.Context, idAllocator allocator.GIDAllocator, tsoAllocator tso.Allocator) *scheduler { + ctx1, cancel := context.WithCancel(ctx) + // TODO + n := 1024 * 10 + return &scheduler{ + ctx: ctx1, + cancel: cancel, + idAllocator: idAllocator, + tsoAllocator: tsoAllocator, + taskChan: make(chan taskV2, n), + } +} + +func (s *scheduler) Start() { + s.wg.Add(1) + go s.taskLoop() +} + +func (s *scheduler) Stop() { + s.cancel() + s.wg.Wait() +} + +func (s *scheduler) taskLoop() { + defer s.wg.Done() + for { + select { + case <-s.ctx.Done(): + return + case task := <-s.taskChan: + if err := task.Prepare(task.GetCtx()); err != nil { + task.NotifyDone(err) + continue + } + err := task.Execute(task.GetCtx()) + task.NotifyDone(err) + } + } +} + +func (s *scheduler) setID(task taskV2) error { + id, err := s.idAllocator.AllocOne() + if err != nil { + return err + } + task.SetID(id) + return nil +} + +func (s *scheduler) setTs(task taskV2) error { + ts, err := s.tsoAllocator.GenerateTSO(1) + if err != nil { + return err + } + task.SetTs(ts) + return nil +} + +func (s *scheduler) enqueue(task taskV2) { + s.taskChan <- task +} + +func (s *scheduler) AddTask(task taskV2) error { + if err := s.setID(task); err != nil { + return err + } + if err := s.setTs(task); err != nil { + return err + } + s.enqueue(task) + return nil +} diff --git a/internal/rootcoord/scheduler_test.go b/internal/rootcoord/scheduler_test.go new file mode 100644 index 0000000000..dec64303ce --- /dev/null +++ b/internal/rootcoord/scheduler_test.go @@ -0,0 +1,168 @@ +package rootcoord + +import ( + "context" + "errors" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockFailTask struct { + baseTaskV2 + prepareErr error + executeErr error +} + +func newMockFailTask() *mockFailTask { + task := &mockFailTask{ + baseTaskV2: baseTaskV2{ + ctx: context.Background(), + done: make(chan error, 1), + }, + } + task.SetCtx(context.Background()) + return task +} + +func newMockPrepareFailTask() *mockFailTask { + task := newMockFailTask() + task.prepareErr = errors.New("error mock Prepare") + return task +} + +func newMockExecuteFailTask() *mockFailTask { + task := newMockFailTask() + task.prepareErr = errors.New("error mock Execute") + return task +} + +func (m mockFailTask) Prepare(context.Context) error { + return m.prepareErr +} + +func (m mockFailTask) Execute(context.Context) error { + return m.executeErr +} + +type mockNormalTask struct { + baseTaskV2 +} + +func newMockNormalTask() *mockNormalTask { + task := &mockNormalTask{ + baseTaskV2: baseTaskV2{ + ctx: context.Background(), + done: make(chan error, 1), + }, + } + task.SetCtx(context.Background()) + return task +} + +func Test_scheduler_Start_Stop(t *testing.T) { + idAlloc := newMockIDAllocator() + tsoAlloc := newMockTsoAllocator() + ctx := context.Background() + s := newScheduler(ctx, idAlloc, tsoAlloc) + s.Start() + s.Stop() +} + +func Test_scheduler_failed_to_set_id(t *testing.T) { + idAlloc := newMockIDAllocator() + tsoAlloc := newMockTsoAllocator() + idAlloc.AllocOneF = func() (UniqueID, error) { + return 0, errors.New("error mock AllocOne") + } + ctx := context.Background() + s := newScheduler(ctx, idAlloc, tsoAlloc) + s.Start() + defer s.Stop() + task := newMockNormalTask() + err := s.AddTask(task) + assert.Error(t, err) +} + +func Test_scheduler_failed_to_set_ts(t *testing.T) { + idAlloc := newMockIDAllocator() + tsoAlloc := newMockTsoAllocator() + idAlloc.AllocOneF = func() (UniqueID, error) { + return 100, nil + } + tsoAlloc.GenerateTSOF = func(count uint32) (uint64, error) { + return 0, errors.New("error mock GenerateTSO") + } + ctx := context.Background() + s := newScheduler(ctx, idAlloc, tsoAlloc) + s.Start() + defer s.Stop() + task := newMockNormalTask() + err := s.AddTask(task) + assert.Error(t, err) +} + +func Test_scheduler_enqueu_normal_case(t *testing.T) { + idAlloc := newMockIDAllocator() + tsoAlloc := newMockTsoAllocator() + idAlloc.AllocOneF = func() (UniqueID, error) { + return 100, nil + } + tsoAlloc.GenerateTSOF = func(count uint32) (uint64, error) { + return 101, nil + } + ctx := context.Background() + s := newScheduler(ctx, idAlloc, tsoAlloc) + s.Start() + defer s.Stop() + task := newMockNormalTask() + err := s.AddTask(task) + assert.NoError(t, err) + assert.Equal(t, UniqueID(100), task.GetID()) + assert.Equal(t, Timestamp(101), task.GetTs()) +} + +func Test_scheduler_bg(t *testing.T) { + idAlloc := newMockIDAllocator() + tsoAlloc := newMockTsoAllocator() + idAlloc.AllocOneF = func() (UniqueID, error) { + return 100, nil + } + tsoAlloc.GenerateTSOF = func(count uint32) (uint64, error) { + return 101, nil + } + ctx := context.Background() + s := newScheduler(ctx, idAlloc, tsoAlloc) + s.Start() + + n := 10 + tasks := make([]taskV2, 0, n) + for i := 0; i < n; i++ { + which := rand.Int() % 3 + switch which { + case 0: + tasks = append(tasks, newMockPrepareFailTask()) + case 1: + tasks = append(tasks, newMockExecuteFailTask()) + default: + tasks = append(tasks, newMockNormalTask()) + } + } + + for _, task := range tasks { + s.AddTask(task) + } + + for _, task := range tasks { + err := task.WaitToFinish() + switch task.(type) { + case *mockFailTask: + assert.Error(t, err) + case *mockNormalTask: + assert.NoError(t, err) + } + } + + s.Stop() +} diff --git a/internal/rootcoord/show_collection_task.go b/internal/rootcoord/show_collection_task.go new file mode 100644 index 0000000000..902e9eb20c --- /dev/null +++ b/internal/rootcoord/show_collection_task.go @@ -0,0 +1,47 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/milvus-io/milvus/internal/util/tsoutil" +) + +// showCollectionTask show collection request task +type showCollectionTask struct { + baseTaskV2 + Req *milvuspb.ShowCollectionsRequest + Rsp *milvuspb.ShowCollectionsResponse +} + +func (t *showCollectionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_ShowCollections); err != nil { + return err + } + return nil +} + +// Execute task execution +func (t *showCollectionTask) Execute(ctx context.Context) error { + t.Rsp.Status = succStatus() + ts := t.Req.GetTimeStamp() + if ts == 0 { + ts = typeutil.MaxTimestamp + } + colls, err := t.core.meta.ListCollections(ctx, ts) + if err != nil { + t.Rsp.Status = failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()) + return err + } + for _, meta := range colls { + t.Rsp.CollectionNames = append(t.Rsp.CollectionNames, meta.Name) + t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.CollectionID) + t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, meta.CreateTime) + physical, _ := tsoutil.ParseHybridTs(meta.CreateTime) + t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) + } + return nil +} diff --git a/internal/rootcoord/show_collection_task_test.go b/internal/rootcoord/show_collection_task_test.go new file mode 100644 index 0000000000..becb5a7b7b --- /dev/null +++ b/internal/rootcoord/show_collection_task_test.go @@ -0,0 +1,88 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_showCollectionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &showCollectionTask{ + Req: &milvuspb.ShowCollectionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_Undefined, + }, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &showCollectionTask{ + Req: &milvuspb.ShowCollectionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowCollections, + }, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_showCollectionTask_Execute(t *testing.T) { + t.Run("failed to list collections", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &showCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.ShowCollectionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowCollections, + }, + }, + Rsp: &milvuspb.ShowCollectionsResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("success", func(t *testing.T) { + meta := newMockMetaTable() + meta.ListCollectionsFunc = func(ctx context.Context, ts Timestamp) ([]*model.Collection, error) { + return []*model.Collection{ + { + Name: "test coll", + }, + { + Name: "test coll2", + }, + }, nil + } + core := newTestCore(withMeta(meta)) + task := &showCollectionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.ShowCollectionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowCollections, + }, + }, + Rsp: &milvuspb.ShowCollectionsResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.Rsp.GetStatus().GetErrorCode()) + assert.Equal(t, 2, len(task.Rsp.GetCollectionNames())) + }) +} diff --git a/internal/rootcoord/show_partition_task.go b/internal/rootcoord/show_partition_task.go new file mode 100644 index 0000000000..ac29dee98f --- /dev/null +++ b/internal/rootcoord/show_partition_task.go @@ -0,0 +1,51 @@ +package rootcoord + +import ( + "context" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/milvus-io/milvus/internal/util/tsoutil" + "github.com/milvus-io/milvus/internal/util/typeutil" +) + +// showPartitionTask show partition request task +type showPartitionTask struct { + baseTaskV2 + Req *milvuspb.ShowPartitionsRequest + Rsp *milvuspb.ShowPartitionsResponse +} + +func (t *showPartitionTask) Prepare(ctx context.Context) error { + if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_ShowPartitions); err != nil { + return err + } + return nil +} + +// Execute task execution +func (t *showPartitionTask) Execute(ctx context.Context) error { + var coll *model.Collection + var err error + t.Rsp.Status = succStatus() + if t.Req.GetCollectionName() == "" { + coll, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetCollectionID(), typeutil.MaxTimestamp) + } else { + coll, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp) + } + if err != nil { + t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error()) + return err + } + + for _, part := range coll.Partitions { + t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, part.PartitionID) + t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, part.PartitionName) + t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, part.PartitionCreatedTimestamp) + physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp) + t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) + } + + return nil +} diff --git a/internal/rootcoord/show_partition_task_test.go b/internal/rootcoord/show_partition_task_test.go new file mode 100644 index 0000000000..747b439162 --- /dev/null +++ b/internal/rootcoord/show_partition_task_test.go @@ -0,0 +1,119 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/util/typeutil" + + "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/milvuspb" + "github.com/stretchr/testify/assert" +) + +func Test_showPartitionTask_Prepare(t *testing.T) { + t.Run("invalid msg type", func(t *testing.T) { + task := &showPartitionTask{ + Req: &milvuspb.ShowPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_Undefined, + }, + }, + } + err := task.Prepare(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + task := &showPartitionTask{ + Req: &milvuspb.ShowPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowPartitions, + }, + }, + } + err := task.Prepare(context.Background()) + assert.NoError(t, err) + }) +} + +func Test_showPartitionTask_Execute(t *testing.T) { + t.Run("failed to list collections by name", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &showPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.ShowPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowPartitions, + }, + CollectionName: "test coll", + }, + Rsp: &milvuspb.ShowPartitionsResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists) + }) + + t.Run("failed to list collections by id", func(t *testing.T) { + core := newTestCore(withInvalidMeta()) + task := &showPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.ShowPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowPartitions, + }, + CollectionID: 1, + }, + Rsp: &milvuspb.ShowPartitionsResponse{}, + } + err := task.Execute(context.Background()) + assert.Error(t, err) + assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists) + }) + + t.Run("success", func(t *testing.T) { + meta := newMockMetaTable() + meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID typeutil.UniqueID, ts Timestamp) (*model.Collection, error) { + return &model.Collection{ + CollectionID: collectionID, + Name: "test coll", + Partitions: []*model.Partition{ + { + PartitionID: 1, + PartitionName: "test partition1", + }, + { + PartitionID: 2, + PartitionName: "test partition2", + }, + }, + }, nil + } + core := newTestCore(withMeta(meta)) + task := &showPartitionTask{ + baseTaskV2: baseTaskV2{ + core: core, + done: make(chan error, 1), + }, + Req: &milvuspb.ShowPartitionsRequest{ + Base: &commonpb.MsgBase{ + MsgType: commonpb.MsgType_ShowPartitions, + }, + CollectionID: 1, + }, + Rsp: &milvuspb.ShowPartitionsResponse{}, + } + err := task.Execute(context.Background()) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, task.Rsp.GetStatus().GetErrorCode()) + assert.Equal(t, 2, len(task.Rsp.GetPartitionNames())) + }) +} diff --git a/internal/rootcoord/step.go b/internal/rootcoord/step.go new file mode 100644 index 0000000000..ec8f842f02 --- /dev/null +++ b/internal/rootcoord/step.go @@ -0,0 +1,165 @@ +package rootcoord + +import ( + "context" + + pb "github.com/milvus-io/milvus/internal/proto/etcdpb" + + "github.com/milvus-io/milvus/internal/metastore/model" +) + +type Step interface { + Execute(ctx context.Context) error +} + +type baseStep struct { + core *Core +} + +type AddCollectionMetaStep struct { + baseStep + coll *model.Collection +} + +func (s *AddCollectionMetaStep) Execute(ctx context.Context) error { + return s.core.meta.AddCollection(ctx, s.coll) +} + +type DeleteCollectionMetaStep struct { + baseStep + collectionID UniqueID + ts Timestamp +} + +func (s *DeleteCollectionMetaStep) Execute(ctx context.Context) error { + return s.core.meta.RemoveCollection(ctx, s.collectionID, s.ts) +} + +type RemoveDmlChannelsStep struct { + baseStep + pchannels []string +} + +func (s *RemoveDmlChannelsStep) Execute(ctx context.Context) error { + s.core.chanTimeTick.removeDmlChannels(s.pchannels...) + return nil +} + +type WatchChannelsStep struct { + baseStep + info *watchInfo +} + +func (s *WatchChannelsStep) Execute(ctx context.Context) error { + return s.core.broker.WatchChannels(ctx, s.info) +} + +type UnwatchChannelsStep struct { + baseStep + collectionID UniqueID + channels collectionChannels +} + +func (s *UnwatchChannelsStep) Execute(ctx context.Context) error { + return s.core.broker.UnwatchChannels(ctx, &watchInfo{collectionID: s.collectionID, vChannels: s.channels.virtualChannels}) +} + +type ChangeCollectionStateStep struct { + baseStep + collectionID UniqueID + state pb.CollectionState + ts Timestamp +} + +func (s *ChangeCollectionStateStep) Execute(ctx context.Context) error { + return s.core.meta.ChangeCollectionState(ctx, s.collectionID, s.state, s.ts) +} + +type ExpireCacheStep struct { + baseStep + collectionNames []string + collectionID UniqueID + ts Timestamp +} + +func (s *ExpireCacheStep) Execute(ctx context.Context) error { + return s.core.ExpireMetaCache(ctx, s.collectionNames, s.collectionID, s.ts) +} + +type DeleteCollectionDataStep struct { + baseStep + coll *model.Collection + ts Timestamp +} + +func (s *DeleteCollectionDataStep) Execute(ctx context.Context) error { + return s.core.garbageCollector.GcCollectionData(ctx, s.coll, s.ts) +} + +type DeletePartitionDataStep struct { + baseStep + pchans []string + partition *model.Partition + ts Timestamp +} + +func (s *DeletePartitionDataStep) Execute(ctx context.Context) error { + return s.core.garbageCollector.GcPartitionData(ctx, s.pchans, s.partition, s.ts) +} + +type ReleaseCollectionStep struct { + baseStep + collectionID UniqueID +} + +func (s *ReleaseCollectionStep) Execute(ctx context.Context) error { + return s.core.broker.ReleaseCollection(ctx, s.collectionID) +} + +type DropIndexStep struct { + baseStep + collID UniqueID +} + +func (s *DropIndexStep) Execute(ctx context.Context) error { + return s.core.broker.DropCollectionIndex(ctx, s.collID) +} + +type AddPartitionMetaStep struct { + baseStep + partition *model.Partition +} + +func (s *AddPartitionMetaStep) Execute(ctx context.Context) error { + return s.core.meta.AddPartition(ctx, s.partition) +} + +type ChangePartitionStateStep struct { + baseStep + collectionID UniqueID + partitionID UniqueID + state pb.PartitionState + ts Timestamp +} + +func (s *ChangePartitionStateStep) Execute(ctx context.Context) error { + return s.core.meta.ChangePartitionState(ctx, s.collectionID, s.partitionID, s.state, s.ts) +} + +type RemovePartitionMetaStep struct { + baseStep + collectionID UniqueID + partitionID UniqueID + ts Timestamp +} + +func (s *RemovePartitionMetaStep) Execute(ctx context.Context) error { + return s.core.meta.RemovePartition(ctx, s.collectionID, s.partitionID, s.ts) +} + +type NullStep struct { +} + +func (s *NullStep) Execute(ctx context.Context) error { + return nil +} diff --git a/internal/rootcoord/task.go b/internal/rootcoord/task.go deleted file mode 100644 index df06f273fa..0000000000 --- a/internal/rootcoord/task.go +++ /dev/null @@ -1,954 +0,0 @@ -// Licensed to the LF AI & Data foundation under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rootcoord - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/milvus-io/milvus/internal/common" - "github.com/milvus-io/milvus/internal/log" - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/milvuspb" - "github.com/milvus-io/milvus/internal/proto/schemapb" - "github.com/milvus-io/milvus/internal/util/funcutil" - "github.com/milvus-io/milvus/internal/util/tsoutil" - "go.uber.org/zap" -) - -type reqTask interface { - Ctx() context.Context - Type() commonpb.MsgType - Execute(ctx context.Context) error - Core() *Core -} - -type baseReqTask struct { - ctx context.Context - core *Core -} - -func (b *baseReqTask) Core() *Core { - return b.core -} - -func (b *baseReqTask) Ctx() context.Context { - return b.ctx -} - -func executeTask(t reqTask) error { - errChan := make(chan error) - - go func() { - err := t.Execute(t.Ctx()) - errChan <- err - }() - select { - case <-t.Core().ctx.Done(): - return fmt.Errorf("context canceled") - case <-t.Ctx().Done(): - return fmt.Errorf("context canceled") - case err := <-errChan: - if t.Core().ctx.Err() != nil || t.Ctx().Err() != nil { - return fmt.Errorf("context canceled") - } - return err - } -} - -// CreateCollectionReqTask create collection request task -type CreateCollectionReqTask struct { - baseReqTask - Req *milvuspb.CreateCollectionRequest -} - -// Type return msg type -func (t *CreateCollectionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -func hasSystemFields(schema *schemapb.CollectionSchema, systemFields []string) bool { - for _, f := range schema.GetFields() { - if funcutil.SliceContain(systemFields, f.GetName()) { - return true - } - } - return false -} - -// Execute task execution -func (t *CreateCollectionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_CreateCollection { - return fmt.Errorf("create collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - var schema schemapb.CollectionSchema - err := proto.Unmarshal(t.Req.Schema, &schema) - if err != nil { - return fmt.Errorf("unmarshal schema error= %w", err) - } - - if t.Req.CollectionName != schema.Name { - return fmt.Errorf("collection name = %s, schema.Name=%s", t.Req.CollectionName, schema.Name) - } - if t.Req.ShardsNum <= 0 { - t.Req.ShardsNum = common.DefaultShardsNum - } - log.Debug("CreateCollectionReqTask Execute", zap.Any("CollectionName", t.Req.CollectionName), - zap.Int32("ShardsNum", t.Req.ShardsNum), - zap.String("ConsistencyLevel", t.Req.ConsistencyLevel.String())) - - if hasSystemFields(&schema, []string{RowIDFieldName, TimeStampFieldName}) { - log.Error("failed to create collection, user schema contain system field") - return fmt.Errorf("schema contains system field: %s, %s", RowIDFieldName, TimeStampFieldName) - } - - for idx, field := range schema.Fields { - field.FieldID = int64(idx + StartOfUserFieldID) - } - rowIDField := &schemapb.FieldSchema{ - FieldID: int64(RowIDField), - Name: RowIDFieldName, - IsPrimaryKey: false, - Description: "row id", - DataType: schemapb.DataType_Int64, - } - timeStampField := &schemapb.FieldSchema{ - FieldID: int64(TimeStampField), - Name: TimeStampFieldName, - IsPrimaryKey: false, - Description: "time stamp", - DataType: schemapb.DataType_Int64, - } - schema.Fields = append(schema.Fields, rowIDField, timeStampField) - - collID, _, err := t.core.IDAllocator(1) - if err != nil { - return fmt.Errorf("alloc collection id error = %w", err) - } - partID, _, err := t.core.IDAllocator(1) - if err != nil { - return fmt.Errorf("alloc partition id error = %w", err) - } - - log.Debug("collection name -> id", - zap.String("collection name", t.Req.CollectionName), - zap.Int64("collection_id", collID), - zap.Int64("default partition id", partID)) - - vchanNames := make([]string, t.Req.ShardsNum) - deltaChanNames := make([]string, t.Req.ShardsNum) - - //physical channel names - chanNames := t.core.chanTimeTick.getDmlChannelNames(int(t.Req.ShardsNum)) - for i := int32(0); i < t.Req.ShardsNum; i++ { - vchanNames[i] = fmt.Sprintf("%s_%dv%d", chanNames[i], collID, i) - deltaChanNames[i], err = funcutil.ConvertChannelName(chanNames[i], Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta) - if err != nil { - log.Warn("failed to generate delta channel name", - zap.String("dmlChannelName", chanNames[i]), - zap.Error(err)) - return fmt.Errorf("failed to generate delta channel name from %s, %w", chanNames[i], err) - } - } - - // schema is modified (add RowIDField and TimestampField), - // so need Marshal again - schemaBytes, err := proto.Marshal(&schema) - if err != nil { - return fmt.Errorf("marshal schema error = %w", err) - } - - ddCollReq := internalpb.CreateCollectionRequest{ - Base: t.Req.Base, - DbName: t.Req.DbName, - CollectionName: t.Req.CollectionName, - PartitionName: Params.CommonCfg.DefaultPartitionName, - DbID: 0, //TODO,not used - CollectionID: collID, - PartitionID: partID, - Schema: schemaBytes, - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - } - - reason := fmt.Sprintf("create collection %d", collID) - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("tso alloc fail, error = %w", err) - } - - // build DdOperation and save it into etcd, when ddmsg send fail, - // system can restore ddmsg from etcd and re-send - ddCollReq.Base.Timestamp = ts - ddOpStr, err := EncodeDdOperation(&ddCollReq, CreateCollectionDDType) - if err != nil { - return fmt.Errorf("encodeDdOperation fail, error = %w", err) - } - - collInfo := model.Collection{ - CollectionID: collID, - Name: schema.Name, - Description: schema.Description, - AutoID: schema.AutoID, - Fields: model.UnmarshalFieldModels(schema.Fields), - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - ShardsNum: t.Req.ShardsNum, - ConsistencyLevel: t.Req.ConsistencyLevel, - CreateTime: ts, - Partitions: []*model.Partition{ - { - PartitionID: partID, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: ts, - }, - }, - } - - // use lambda function here to guarantee all resources to be released - createCollectionFn := func() error { - // lock for ddl operation - t.core.ddlLock.Lock() - defer t.core.ddlLock.Unlock() - - t.core.chanTimeTick.addDdlTimeTick(ts, reason) - // clear ddl timetick in all conditions - defer t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - - // add dml channel before send dd msg - t.core.chanTimeTick.addDmlChannels(chanNames...) - - ids, err := t.core.SendDdCreateCollectionReq(ctx, &ddCollReq, chanNames) - if err != nil { - return fmt.Errorf("send dd create collection req failed, error = %w", err) - } - for _, pchan := range collInfo.PhysicalChannelNames { - collInfo.StartPositions = append(collInfo.StartPositions, &commonpb.KeyDataPair{ - Key: pchan, - Data: ids[pchan], - }) - } - - // update meta table after send dd operation - if err = t.core.MetaTable.AddCollection(&collInfo, ts, ddOpStr); err != nil { - t.core.chanTimeTick.removeDmlChannels(chanNames...) - // it's ok just to leave create collection message sent, datanode and querynode does't process CreateCollection logic - return fmt.Errorf("meta table add collection failed,error = %w", err) - } - - // use addDdlTimeTick and removeDdlTimeTick to mark DDL operation in process - t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - errTimeTick := t.core.SendTimeTick(ts, reason) - if errTimeTick != nil { - log.Warn("Failed to send timetick", zap.Error(errTimeTick)) - } - return nil - } - - if err = createCollectionFn(); err != nil { - return err - } - - if err = t.core.CallWatchChannels(ctx, collID, vchanNames, collInfo.StartPositions); err != nil { - return err - } - - log.NewMetaLogger().WithCollectionMeta(&collInfo).WithOperation(log.CreateCollection).WithTSO(ts).Info() - return nil -} - -// DropCollectionReqTask drop collection request task -type DropCollectionReqTask struct { - baseReqTask - Req *milvuspb.DropCollectionRequest -} - -// Type return msg type -func (t *DropCollectionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *DropCollectionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_DropCollection { - return fmt.Errorf("drop collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - if t.core.MetaTable.IsAlias(t.Req.CollectionName) { - return fmt.Errorf("cannot drop the collection via alias = %s", t.Req.CollectionName) - } - - collMeta, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0) - if err != nil { - return err - } - - ddReq := internalpb.DropCollectionRequest{ - Base: t.Req.Base, - DbName: t.Req.DbName, - CollectionName: t.Req.CollectionName, - DbID: 0, //not used - CollectionID: collMeta.CollectionID, - } - - reason := fmt.Sprintf("drop collection %d", collMeta.CollectionID) - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - - //notify query service to release collection - if err = t.core.CallReleaseCollectionService(t.core.ctx, ts, 0, collMeta.CollectionID); err != nil { - log.Error("Failed to CallReleaseCollectionService", zap.Error(err)) - return err - } - - // drop all indexes - if err := t.core.CallDropCollectionIndexService(t.core.ctx, collMeta.CollectionID); err != nil { - log.Error("DropCollection CallDropIndexService fail", zap.String("collName", t.Req.CollectionName), - zap.Int64("collID", collMeta.CollectionID), zap.Error(err)) - return err - } - - // Allocate a new ts to make sure the channel timetick is consistent. - ts, err = t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - - // build DdOperation and save it into etcd, when ddmsg send fail, - // system can restore ddmsg from etcd and re-send - ddReq.Base.Timestamp = ts - ddOpStr, err := EncodeDdOperation(&ddReq, DropCollectionDDType) - if err != nil { - return fmt.Errorf("encodeDdOperation fail, error = %w", err) - } - - // use lambda function here to guarantee all resources to be released - dropCollectionFn := func() error { - // lock for ddl operation - t.core.ddlLock.Lock() - defer t.core.ddlLock.Unlock() - - t.core.chanTimeTick.addDdlTimeTick(ts, reason) - // clear ddl timetick in all conditions - defer t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - - if err = t.core.SendDdDropCollectionReq(ctx, &ddReq, collMeta.PhysicalChannelNames); err != nil { - return err - } - - // update meta table after send dd operation - if err = t.core.MetaTable.DeleteCollection(collMeta.CollectionID, ts, ddOpStr); err != nil { - return err - } - - // use addDdlTimeTick and removeDdlTimeTick to mark DDL operation in process - t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - errTimeTick := t.core.SendTimeTick(ts, reason) - if errTimeTick != nil { - log.Warn("Failed to send timetick", zap.Error(errTimeTick)) - } - // send tt into deleted channels to tell data_node to clear flowgragh - err := t.core.chanTimeTick.sendTimeTickToChannel(collMeta.PhysicalChannelNames, ts) - if err != nil { - log.Warn("failed to send time tick to channel", zap.Any("physical names", collMeta.PhysicalChannelNames), zap.Error(err)) - } - // remove dml channel after send dd msg - t.core.chanTimeTick.removeDmlChannels(collMeta.PhysicalChannelNames...) - - return nil - } - - if err = dropCollectionFn(); err != nil { - return err - } - - // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts) - if err != nil { - return err - } - - log.NewMetaLogger().WithCollectionID(collMeta.CollectionID). - WithCollectionName(collMeta.Name).WithTSO(ts).WithOperation(log.DropCollection).Info() - return nil -} - -// HasCollectionReqTask has collection request task -type HasCollectionReqTask struct { - baseReqTask - Req *milvuspb.HasCollectionRequest - HasCollection bool -} - -// Type return msg type -func (t *HasCollectionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *HasCollectionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_HasCollection { - return fmt.Errorf("has collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - _, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, t.Req.TimeStamp) - if err == nil { - t.HasCollection = true - } else { - t.HasCollection = false - } - return nil -} - -// DescribeCollectionReqTask describe collection request task -type DescribeCollectionReqTask struct { - baseReqTask - Req *milvuspb.DescribeCollectionRequest - Rsp *milvuspb.DescribeCollectionResponse -} - -// Type return msg type -func (t *DescribeCollectionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_DescribeCollection { - return fmt.Errorf("describe collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - var collInfo *model.Collection - var err error - - if t.Req.CollectionName != "" { - collInfo, err = t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, t.Req.TimeStamp) - if err != nil { - return err - } - } else { - collInfo, err = t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, t.Req.TimeStamp) - if err != nil { - return err - } - } - - t.Rsp.Schema = &schemapb.CollectionSchema{ - Name: collInfo.Name, - Description: collInfo.Description, - AutoID: collInfo.AutoID, - Fields: model.MarshalFieldModels(collInfo.Fields), - } - t.Rsp.CollectionID = collInfo.CollectionID - t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames - t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames - if collInfo.ShardsNum == 0 { - collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames)) - } - t.Rsp.ShardsNum = collInfo.ShardsNum - t.Rsp.ConsistencyLevel = collInfo.ConsistencyLevel - - t.Rsp.CreatedTimestamp = collInfo.CreateTime - createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime) - t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime) - t.Rsp.Aliases = t.core.MetaTable.ListAliases(collInfo.CollectionID) - t.Rsp.StartPositions = collInfo.StartPositions - t.Rsp.CollectionName = t.Rsp.Schema.Name - return nil -} - -// ShowCollectionReqTask show collection request task -type ShowCollectionReqTask struct { - baseReqTask - Req *milvuspb.ShowCollectionsRequest - Rsp *milvuspb.ShowCollectionsResponse -} - -// Type return msg type -func (t *ShowCollectionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *ShowCollectionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_ShowCollections { - return fmt.Errorf("show collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - coll, err := t.core.MetaTable.ListCollections(t.Req.TimeStamp) - if err != nil { - return err - } - for name, meta := range coll { - t.Rsp.CollectionNames = append(t.Rsp.CollectionNames, name) - t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.CollectionID) - t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, meta.CreateTime) - physical, _ := tsoutil.ParseHybridTs(meta.CreateTime) - t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) - } - return nil -} - -// CreatePartitionReqTask create partition request task -type CreatePartitionReqTask struct { - baseReqTask - Req *milvuspb.CreatePartitionRequest -} - -// Type return msg type -func (t *CreatePartitionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *CreatePartitionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_CreatePartition { - return fmt.Errorf("create partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - collMeta, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0) - if err != nil { - return err - } - partID, _, err := t.core.IDAllocator(1) - if err != nil { - return err - } - - ddReq := internalpb.CreatePartitionRequest{ - Base: t.Req.Base, - DbName: t.Req.DbName, - CollectionName: t.Req.CollectionName, - PartitionName: t.Req.PartitionName, - DbID: 0, // todo, not used - CollectionID: collMeta.CollectionID, - PartitionID: partID, - } - - reason := fmt.Sprintf("create partition %s", t.Req.PartitionName) - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - - // build DdOperation and save it into etcd, when ddmsg send fail, - // system can restore ddmsg from etcd and re-send - ddReq.Base.Timestamp = ts - ddOpStr, err := EncodeDdOperation(&ddReq, CreatePartitionDDType) - if err != nil { - return fmt.Errorf("encodeDdOperation fail, error = %w", err) - } - - // use lambda function here to guarantee all resources to be released - createPartitionFn := func() error { - // lock for ddl operation - t.core.ddlLock.Lock() - defer t.core.ddlLock.Unlock() - - t.core.chanTimeTick.addDdlTimeTick(ts, reason) - // clear ddl timetick in all conditions - defer t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - - if err = t.core.SendDdCreatePartitionReq(ctx, &ddReq, collMeta.PhysicalChannelNames); err != nil { - return err - } - - // update meta table after send dd operation - if err = t.core.MetaTable.AddPartition(collMeta.CollectionID, t.Req.PartitionName, partID, ts, ddOpStr); err != nil { - return err - } - - // use addDdlTimeTick and removeDdlTimeTick to mark DDL operation in process - t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - errTimeTick := t.core.SendTimeTick(ts, reason) - if errTimeTick != nil { - log.Warn("Failed to send timetick", zap.Error(errTimeTick)) - } - return nil - } - - if err = createPartitionFn(); err != nil { - return err - } - - // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts) - if err != nil { - return err - } - - log.NewMetaLogger().WithCollectionName(collMeta.Name).WithCollectionID(collMeta.CollectionID). - WithPartitionID(partID).WithPartitionName(t.Req.PartitionName).WithTSO(ts).WithOperation(log.CreatePartition).Info() - return nil -} - -// DropPartitionReqTask drop partition request task -type DropPartitionReqTask struct { - baseReqTask - Req *milvuspb.DropPartitionRequest -} - -// Type return msg type -func (t *DropPartitionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *DropPartitionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_DropPartition { - return fmt.Errorf("drop partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - collInfo, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0) - if err != nil { - return err - } - partID, err := t.core.MetaTable.GetPartitionByName(collInfo.CollectionID, t.Req.PartitionName, 0) - if err != nil { - return err - } - - ddReq := internalpb.DropPartitionRequest{ - Base: t.Req.Base, - DbName: t.Req.DbName, - CollectionName: t.Req.CollectionName, - PartitionName: t.Req.PartitionName, - DbID: 0, //todo,not used - CollectionID: collInfo.CollectionID, - PartitionID: partID, - } - - reason := fmt.Sprintf("drop partition %s", t.Req.PartitionName) - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - - // build DdOperation and save it into etcd, when ddmsg send fail, - // system can restore ddmsg from etcd and re-send - ddReq.Base.Timestamp = ts - ddOpStr, err := EncodeDdOperation(&ddReq, DropPartitionDDType) - if err != nil { - return fmt.Errorf("encodeDdOperation fail, error = %w", err) - } - - // use lambda function here to guarantee all resources to be released - dropPartitionFn := func() error { - // lock for ddl operation - t.core.ddlLock.Lock() - defer t.core.ddlLock.Unlock() - - t.core.chanTimeTick.addDdlTimeTick(ts, reason) - // clear ddl timetick in all conditions - defer t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - - if err = t.core.SendDdDropPartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil { - return err - } - - // update meta table after send dd operation - if _, err = t.core.MetaTable.DeletePartition(collInfo.CollectionID, t.Req.PartitionName, ts, ddOpStr); err != nil { - return err - } - - // use addDdlTimeTick and removeDdlTimeTick to mark DDL operation in process - t.core.chanTimeTick.removeDdlTimeTick(ts, reason) - errTimeTick := t.core.SendTimeTick(ts, reason) - if errTimeTick != nil { - log.Warn("Failed to send timetick", zap.Error(errTimeTick)) - } - return nil - } - - if err = dropPartitionFn(); err != nil { - return err - } - - // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collInfo.CollectionID, ts) - if err != nil { - return err - } - - //notify query service to release partition - // TODO::xige-16, reOpen when queryCoord support release partitions after load collection - //if err = t.core.CallReleasePartitionService(t.core.ctx, ts, 0, collInfo.ID, []typeutil.UniqueID{partID}); err != nil { - // log.Error("Failed to CallReleaseCollectionService", zap.Error(err)) - // return err - //} - - log.NewMetaLogger().WithCollectionID(collInfo.CollectionID).WithCollectionName(collInfo.Name). - WithPartitionName(t.Req.PartitionName).WithTSO(ts).WithOperation(log.DropCollection).Info() - return nil -} - -// HasPartitionReqTask has partition request task -type HasPartitionReqTask struct { - baseReqTask - Req *milvuspb.HasPartitionRequest - HasPartition bool -} - -// Type return msg type -func (t *HasPartitionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *HasPartitionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_HasPartition { - return fmt.Errorf("has partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0) - if err != nil { - return err - } - t.HasPartition = t.core.MetaTable.HasPartition(coll.CollectionID, t.Req.PartitionName, 0) - return nil -} - -// ShowPartitionReqTask show partition request task -type ShowPartitionReqTask struct { - baseReqTask - Req *milvuspb.ShowPartitionsRequest - Rsp *milvuspb.ShowPartitionsResponse -} - -// Type return msg type -func (t *ShowPartitionReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *ShowPartitionReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_ShowPartitions { - return fmt.Errorf("show partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - var coll *model.Collection - var err error - if t.Req.CollectionName == "" { - coll, err = t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, 0) - } else { - coll, err = t.core.MetaTable.GetCollectionByName(t.Req.CollectionName, 0) - } - if err != nil { - return err - } - - for _, part := range coll.Partitions { - t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, part.PartitionID) - t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, part.PartitionName) - t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, part.PartitionCreatedTimestamp) - - physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp) - t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) - } - - return nil -} - -// DescribeSegmentReqTask describe segment request task -//type DescribeSegmentReqTask struct { -// baseReqTask -// Req *milvuspb.DescribeSegmentRequest -// Rsp *milvuspb.DescribeSegmentResponse //TODO,return repeated segment id in the future -//} -// -//// Type return msg type -//func (t *DescribeSegmentReqTask) Type() commonpb.MsgType { -// return t.Req.Base.MsgType -//} -// -//// Execute task execution -//func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error { -// if t.Type() != commonpb.MsgType_DescribeSegment { -// return fmt.Errorf("describe segment, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) -// } -// coll, err := t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, 0) -// if err != nil { -// return err -// } -// -// segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, -1) -// if err != nil { -// log.Debug("Get flushed segment from data coord failed", zap.String("collection_name", coll.Name), zap.Error(err)) -// return err -// } -// -// // check if segment id exists -// exist := false -// for _, id := range segIDs { -// if id == t.Req.SegmentID { -// exist = true -// break -// } -// } -// if !exist { -// return fmt.Errorf("segment id %d not belong to collection id %d", t.Req.SegmentID, t.Req.CollectionID) -// } -// //TODO, get filed_id and index_name from request -// index, err := t.core.MetaTable.GetSegmentIndexInfoByID(t.Req.SegmentID, -1, "") -// log.Debug("RootCoord DescribeSegmentReqTask, MetaTable.GetSegmentIndexInfoByID", zap.Any("SegmentID", t.Req.SegmentID), -// zap.Any("index", index), zap.Error(err)) -// if err != nil { -// return err -// } -// t.Rsp.IndexID = index.IndexID -// t.Rsp.BuildID = index.SegmentIndexes[t.Req.SegmentID].BuildID -// t.Rsp.EnableIndex = index.SegmentIndexes[t.Req.SegmentID].EnableIndex -// t.Rsp.FieldID = index.FieldID -// return nil -//} - -// ShowSegmentReqTask show segment request task -type ShowSegmentReqTask struct { - baseReqTask - Req *milvuspb.ShowSegmentsRequest - Rsp *milvuspb.ShowSegmentsResponse -} - -// Type return msg type -func (t *ShowSegmentReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *ShowSegmentReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_ShowSegments { - return fmt.Errorf("show segments, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - coll, err := t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, 0) - if err != nil { - return err - } - exist := false - for _, partition := range coll.Partitions { - if partition.PartitionID == t.Req.PartitionID { - exist = true - break - } - } - if !exist { - return fmt.Errorf("partition id = %d not belong to collection id = %d", t.Req.PartitionID, t.Req.CollectionID) - } - segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, t.Req.PartitionID) - if err != nil { - log.Debug("Get flushed segments from data coord failed", zap.String("collection name", coll.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err)) - return err - } - - t.Rsp.SegmentIDs = append(t.Rsp.SegmentIDs, segIDs...) - return nil -} - -// CreateAliasReqTask create alias request task -type CreateAliasReqTask struct { - baseReqTask - Req *milvuspb.CreateAliasRequest -} - -// Type return msg type -func (t *CreateAliasReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *CreateAliasReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_CreateAlias { - return fmt.Errorf("create alias, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - err = t.core.MetaTable.AddAlias(t.Req.Alias, t.Req.CollectionName, ts) - if err != nil { - return fmt.Errorf("meta table add alias failed, error = %w", err) - } - - log.NewMetaLogger().WithCollectionName(t.Req.CollectionName).WithAlias(t.Req.Alias).WithTSO(ts).WithOperation(log.CreateCollectionAlias).Info() - return nil -} - -// DropAliasReqTask drop alias request task -type DropAliasReqTask struct { - baseReqTask - Req *milvuspb.DropAliasRequest -} - -// Type return msg type -func (t *DropAliasReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *DropAliasReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_DropAlias { - return fmt.Errorf("create alias, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - err = t.core.MetaTable.DropAlias(t.Req.Alias, ts) - if err != nil { - return fmt.Errorf("meta table drop alias failed, error = %w", err) - } - - if err := t.core.ExpireMetaCache(ctx, []string{t.Req.Alias}, InvalidCollectionID, ts); err != nil { - return err - } - - log.NewMetaLogger().WithAlias(t.Req.Alias).WithOperation(log.DropCollectionAlias).WithTSO(ts).Info() - return nil -} - -// AlterAliasReqTask alter alias request task -type AlterAliasReqTask struct { - baseReqTask - Req *milvuspb.AlterAliasRequest -} - -// Type return msg type -func (t *AlterAliasReqTask) Type() commonpb.MsgType { - return t.Req.Base.MsgType -} - -// Execute task execution -func (t *AlterAliasReqTask) Execute(ctx context.Context) error { - if t.Type() != commonpb.MsgType_AlterAlias { - return fmt.Errorf("alter alias, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) - } - - ts, err := t.core.TSOAllocator(1) - if err != nil { - return fmt.Errorf("TSO alloc fail, error = %w", err) - } - err = t.core.MetaTable.AlterAlias(t.Req.Alias, t.Req.CollectionName, ts) - if err != nil { - return fmt.Errorf("meta table alter alias failed, error = %w", err) - } - - if err := t.core.ExpireMetaCache(ctx, []string{t.Req.Alias}, InvalidCollectionID, ts); err != nil { - return nil - } - - log.NewMetaLogger().WithCollectionName(t.Req.CollectionName). - WithAlias(t.Req.Alias).WithOperation(log.AlterCollectionAlias).WithTSO(ts).Info() - return nil -} diff --git a/internal/rootcoord/task_test.go b/internal/rootcoord/task_test.go deleted file mode 100644 index 14e0853ef0..0000000000 --- a/internal/rootcoord/task_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package rootcoord - -import ( - "context" - "testing" - - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/assert" - - "github.com/milvus-io/milvus/internal/util/typeutil" - - "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/milvuspb" - "github.com/milvus-io/milvus/internal/proto/schemapb" - "github.com/milvus-io/milvus/internal/util/dependency" -) - -//func TestDescribeSegmentReqTask_Type(t *testing.T) { -// tsk := &DescribeSegmentsReqTask{ -// Req: &rootcoordpb.DescribeSegmentsRequest{ -// Base: &commonpb.MsgBase{ -// MsgType: commonpb.MsgType_DescribeSegments, -// }, -// }, -// } -// assert.Equal(t, commonpb.MsgType_DescribeSegments, tsk.Type()) -//} - -//func TestDescribeSegmentsReqTask_Execute(t *testing.T) { -// collID := typeutil.UniqueID(1) -// partID := typeutil.UniqueID(2) -// segID := typeutil.UniqueID(100) -// fieldID := typeutil.UniqueID(3) -// buildID := typeutil.UniqueID(4) -// indexID := typeutil.UniqueID(1000) -// indexName := "test_describe_segments_index" -// -// c := &Core{} -// -// // failed to get flushed segments. -// c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { -// return nil, errors.New("mock") -// } -// tsk := &DescribeSegmentsReqTask{ -// baseReqTask: baseReqTask{ -// core: c, -// }, -// Req: &rootcoordpb.DescribeSegmentsRequest{ -// Base: &commonpb.MsgBase{ -// MsgType: commonpb.MsgType_DescribeSegments, -// }, -// CollectionID: collID, -// SegmentIDs: []typeutil.UniqueID{segID}, -// }, -// Rsp: &rootcoordpb.DescribeSegmentsResponse{}, -// } -// assert.Error(t, tsk.Execute(context.Background())) -// -// // requested segment not found in flushed segments. -// c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { -// return []typeutil.UniqueID{}, nil -// } -// assert.Error(t, tsk.Execute(context.Background())) -// -// // segment not found in meta. -// c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) { -// return []typeutil.UniqueID{segID}, nil -// } -// c.MetaTable = &MetaTable{ -// segID2IndexID: make(map[typeutil.UniqueID]typeutil.UniqueID, 1), -// } -// assert.NoError(t, tsk.Execute(context.Background())) -// -// // index not found in meta -// c.MetaTable = &MetaTable{ -// segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID}, -// indexID2Meta: map[typeutil.UniqueID]*model.Index{ -// indexID: { -// CollectionID: collID, -// FieldID: fieldID, -// IndexID: indexID, -// SegmentIndexes: map[int64]model.SegmentIndex{ -// segID + 1: { -// Segment: model.Segment{ -// SegmentID: segID, -// PartitionID: partID, -// }, -// BuildID: buildID, -// EnableIndex: true, -// }, -// }, -// }, -// }, -// } -// assert.Error(t, tsk.Execute(context.Background())) -// -// // success. -// c.MetaTable = &MetaTable{ -// segID2IndexID: map[typeutil.UniqueID]typeutil.UniqueID{segID: indexID}, -// indexID2Meta: map[typeutil.UniqueID]*model.Index{ -// indexID: { -// CollectionID: collID, -// FieldID: fieldID, -// IndexID: indexID, -// IndexName: indexName, -// IndexParams: nil, -// SegmentIndexes: map[int64]model.SegmentIndex{ -// segID: { -// Segment: model.Segment{ -// SegmentID: segID, -// PartitionID: partID, -// }, -// BuildID: buildID, -// EnableIndex: true, -// }, -// }, -// }, -// }, -// } -// assert.NoError(t, tsk.Execute(context.Background())) -//} - -func Test_hasSystemFields(t *testing.T) { - t.Run("no system fields", func(t *testing.T) { - schema := &schemapb.CollectionSchema{Fields: []*schemapb.FieldSchema{{Name: "not_system_field"}}} - assert.False(t, hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName})) - }) - - t.Run("has row id field", func(t *testing.T) { - schema := &schemapb.CollectionSchema{Fields: []*schemapb.FieldSchema{{Name: RowIDFieldName}}} - assert.True(t, hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName})) - }) - - t.Run("has timestamp field", func(t *testing.T) { - schema := &schemapb.CollectionSchema{Fields: []*schemapb.FieldSchema{{Name: TimeStampFieldName}}} - assert.True(t, hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName})) - }) -} - -func TestCreateCollectionReqTask_Execute_hasSystemFields(t *testing.T) { - schema := &schemapb.CollectionSchema{Name: "test", Fields: []*schemapb.FieldSchema{{Name: TimeStampFieldName}}} - marshaledSchema, err := proto.Marshal(schema) - assert.NoError(t, err) - task := &CreateCollectionReqTask{ - Req: &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, - CollectionName: "test", - Schema: marshaledSchema, - }, - } - err = task.Execute(context.Background()) - assert.Error(t, err) -} - -func TestCreateCollectionReqTask_ChannelMismatch(t *testing.T) { - schema := &schemapb.CollectionSchema{Name: "test", Fields: []*schemapb.FieldSchema{{Name: "f1"}}} - marshaledSchema, err := proto.Marshal(schema) - assert.NoError(t, err) - msFactory := dependency.NewDefaultFactory(true) - - Params.Init() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - core, err := NewCore(ctx, msFactory) - assert.NoError(t, err) - core.IDAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) { - return 0, 0, nil - } - core.chanTimeTick = newTimeTickSync(core.ctx, 1, core.factory, nil) - core.TSOAllocator = func(count uint32) (typeutil.Timestamp, error) { - return 0, nil - } - core.SendDdCreateCollectionReq = func(context.Context, *internalpb.CreateCollectionRequest, []string) (map[string][]byte, error) { - return map[string][]byte{}, nil - } - - // set RootCoordDml="" to trigger a error for code coverage - Params.CommonCfg.RootCoordDml = "" - task := &CreateCollectionReqTask{ - baseReqTask: baseReqTask{ - core: core, - }, - Req: &milvuspb.CreateCollectionRequest{ - Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, - CollectionName: "test", - Schema: marshaledSchema, - }, - } - err = task.Execute(context.Background()) - assert.Error(t, err) -} diff --git a/internal/rootcoord/task_v2.go b/internal/rootcoord/task_v2.go new file mode 100644 index 0000000000..a04a684635 --- /dev/null +++ b/internal/rootcoord/task_v2.go @@ -0,0 +1,66 @@ +package rootcoord + +import ( + "context" +) + +type taskV2 interface { + GetCtx() context.Context + SetCtx(context.Context) + SetTs(ts Timestamp) + GetTs() Timestamp + SetID(id UniqueID) + GetID() UniqueID + Prepare(ctx context.Context) error + Execute(ctx context.Context) error + WaitToFinish() error + NotifyDone(err error) +} + +type baseTaskV2 struct { + ctx context.Context + core *Core + done chan error + ts Timestamp + id UniqueID +} + +func (b *baseTaskV2) SetCtx(ctx context.Context) { + b.ctx = ctx +} + +func (b *baseTaskV2) GetCtx() context.Context { + return b.ctx +} + +func (b *baseTaskV2) SetTs(ts Timestamp) { + b.ts = ts +} + +func (b *baseTaskV2) GetTs() Timestamp { + return b.ts +} + +func (b *baseTaskV2) SetID(id UniqueID) { + b.id = id +} + +func (b *baseTaskV2) GetID() UniqueID { + return b.id +} + +func (b *baseTaskV2) Prepare(ctx context.Context) error { + return nil +} + +func (b *baseTaskV2) Execute(ctx context.Context) error { + return nil +} + +func (b *baseTaskV2) WaitToFinish() error { + return <-b.done +} + +func (b *baseTaskV2) NotifyDone(err error) { + b.done <- err +} diff --git a/internal/rootcoord/timestamp_bench_test.go b/internal/rootcoord/timestamp_bench_test.go new file mode 100644 index 0000000000..706eaac7db --- /dev/null +++ b/internal/rootcoord/timestamp_bench_test.go @@ -0,0 +1,72 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/milvus-io/milvus/internal/proto/rootcoordpb" + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/log" + "go.uber.org/zap" + + "github.com/milvus-io/milvus/internal/util/funcutil" + + "github.com/milvus-io/milvus/internal/util/etcd" + + "github.com/milvus-io/milvus/internal/tso" + "github.com/milvus-io/milvus/internal/util/tsoutil" + clientv3 "go.etcd.io/etcd/client/v3" +) + +func getTestEtcdCli() *clientv3.Client { + Params.InitOnce() + cli, err := etcd.GetEtcdClient(&Params.EtcdCfg) + if err != nil { + panic(err) + } + return cli +} + +func cleanTestEtcdEnv(cli *clientv3.Client, rootPath string) { + ctx := context.Background() + if _, err := cli.Delete(ctx, rootPath, clientv3.WithPrefix()); err != nil { + panic(err) + } + log.Debug("remove root path on etcd", zap.String("rootPath", rootPath)) +} + +func newBenchTSOAllocator(etcdCli *clientv3.Client, rootPath, subPath, key string) *tso.GlobalTSOAllocator { + tsoKV := tsoutil.NewTSOKVBase(etcdCli, rootPath, subPath) + tsoAllocator := tso.NewGlobalTSOAllocator(key, tsoKV) + if err := tsoAllocator.Initialize(); err != nil { + panic(err) + } + return tsoAllocator +} + +func Benchmark_RootCoord_AllocTimestamp(b *testing.B) { + rootPath := funcutil.GenRandomStr() + subPath := funcutil.GenRandomStr() + key := funcutil.GenRandomStr() + log.Info("benchmark for allocating ts", zap.String("rootPath", rootPath), zap.String("subPath", subPath), zap.String("key", key)) + + ctx := context.Background() + cli := getTestEtcdCli() + tsoAllocator := newBenchTSOAllocator(cli, rootPath, subPath, key) + c := newTestCore(withHealthyCode(), + withTsoAllocator(tsoAllocator)) + + defer cleanTestEtcdEnv(cli, rootPath) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + req := rootcoordpb.AllocTimestampRequest{ + Count: 1, + } + _, err := c.AllocTimestamp(ctx, &req) + assert.Nil(b, err) + + } + b.StopTimer() +} diff --git a/internal/rootcoord/timestamp_test.go b/internal/rootcoord/timestamp_test.go deleted file mode 100644 index 5516ede61e..0000000000 --- a/internal/rootcoord/timestamp_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Licensed to the LF AI & Data foundation under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rootcoord - -import ( - "context" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/datapb" - "github.com/milvus-io/milvus/internal/proto/indexpb" - "github.com/milvus-io/milvus/internal/proto/milvuspb" - "github.com/milvus-io/milvus/internal/proto/querypb" - "github.com/milvus-io/milvus/internal/proto/rootcoordpb" - "github.com/milvus-io/milvus/internal/types" - "github.com/milvus-io/milvus/internal/util/dependency" - "github.com/milvus-io/milvus/internal/util/sessionutil" -) - -type tbd struct { - types.DataCoord -} - -func (*tbd) GetInsertBinlogPaths(context.Context, *datapb.GetInsertBinlogPathsRequest) (*datapb.GetInsertBinlogPathsResponse, error) { - return nil, nil -} - -func (*tbd) GetSegmentInfo(context.Context, *datapb.GetSegmentInfoRequest) (*datapb.GetSegmentInfoResponse, error) { - return nil, nil -} - -func (*tbd) GetSegmentInfoChannel(context.Context) (*milvuspb.StringResponse, error) { - return &milvuspb.StringResponse{ - Status: &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_Success, - }, - Value: fmt.Sprintf("tbd-%d", rand.Int()), - }, nil -} - -type tbq struct { - types.QueryCoord -} - -func (*tbq) ReleaseCollection(context.Context, *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) { - return nil, nil -} - -type tbi struct { - types.IndexCoord -} - -func (*tbi) CreateIndex(context.Context, *indexpb.CreateIndexRequest) (*commonpb.Status, error) { - return nil, nil -} - -func (*tbi) DropIndex(context.Context, *indexpb.DropIndexRequest) (*commonpb.Status, error) { - return nil, nil -} - -func BenchmarkAllocTimestamp(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - factory := dependency.NewDefaultFactory(true) - Params.Init() - core, err := NewCore(ctx, factory) - - assert.Nil(b, err) - - randVal := rand.Int() - - Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("master-time-tick-%d", randVal) - Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("master-statistics-%d", randVal) - Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.MetaRootPath) - Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/%s", randVal, Params.EtcdCfg.KvRootPath) - Params.CommonCfg.RootCoordSubName = fmt.Sprintf("subname-%d", randVal) - - err = core.SetDataCoord(ctx, &tbd{}) - assert.Nil(b, err) - - err = core.SetIndexCoord(&tbi{}) - assert.Nil(b, err) - - err = core.SetQueryCoord(&tbq{}) - assert.Nil(b, err) - - err = core.Register() - assert.Nil(b, err) - - pnm := &proxyMock{ - collArray: make([]string, 0, 16), - mutex: sync.Mutex{}, - } - core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) { - return pnm, nil - } - - err = core.Init() - assert.Nil(b, err) - - err = core.Start() - assert.Nil(b, err) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - req := rootcoordpb.AllocTimestampRequest{ - Base: &commonpb.MsgBase{ - MsgID: int64(i), - }, - Count: 1, - } - _, err := core.AllocTimestamp(ctx, &req) - assert.Nil(b, err) - - } - b.StopTimer() -} diff --git a/internal/rootcoord/timeticksync.go b/internal/rootcoord/timeticksync.go index 9fd206095b..4f94d758fe 100644 --- a/internal/rootcoord/timeticksync.go +++ b/internal/rootcoord/timeticksync.go @@ -19,7 +19,6 @@ package rootcoord import ( "context" "fmt" - "math" "sync" "time" @@ -43,6 +42,7 @@ var ( ttCheckerWarnMsg = fmt.Sprintf("RootCoord haven't synchronized the time tick for %f minutes", timeTickSyncTtInterval.Minutes()) ) +// TODO: better to accept ctx for timetickSync-related method, which can trace the ddl. type timetickSync struct { ctx context.Context sourceID typeutil.UniqueID @@ -52,11 +52,6 @@ type timetickSync struct { lock sync.Mutex sess2ChanTsMap map[typeutil.UniqueID]*chanTsMsg sendChan chan map[typeutil.UniqueID]*chanTsMsg - - // record ddl timetick info - ddlLock sync.RWMutex - ddlMinTs typeutil.Timestamp - ddlTsSet map[typeutil.Timestamp]struct{} } type chanTsMsg struct { @@ -103,10 +98,6 @@ func newTimeTickSync(ctx context.Context, sourceID int64, factory msgstream.Fact lock: sync.Mutex{}, sess2ChanTsMap: make(map[typeutil.UniqueID]*chanTsMsg), sendChan: make(chan map[typeutil.UniqueID]*chanTsMsg, 16), - - ddlLock: sync.RWMutex{}, - ddlMinTs: typeutil.Timestamp(math.MaxUint64), - ddlTsSet: make(map[typeutil.Timestamp]struct{}), } } @@ -148,52 +139,6 @@ func (t *timetickSync) sendToChannel() { t.sendChan <- ptt } -// AddDmlTimeTick add ts into ddlTimetickInfos[sourceID], -// can be used to tell if DDL operation is in process. -func (t *timetickSync) addDdlTimeTick(ts typeutil.Timestamp, reason string) { - t.ddlLock.Lock() - defer t.ddlLock.Unlock() - - if ts < t.ddlMinTs { - t.ddlMinTs = ts - } - t.ddlTsSet[ts] = struct{}{} - - log.Debug("add ddl timetick", zap.Uint64("minTs", t.ddlMinTs), zap.Uint64("ts", ts), - zap.Int("len(ddlTsSet)", len(t.ddlTsSet)), zap.String("reason", reason)) -} - -// RemoveDdlTimeTick is invoked in UpdateTimeTick. -// It clears the ts generated by AddDdlTimeTick, indicates DDL operation finished. -func (t *timetickSync) removeDdlTimeTick(ts typeutil.Timestamp, reason string) { - t.ddlLock.Lock() - defer t.ddlLock.Unlock() - - delete(t.ddlTsSet, ts) - log.Debug("remove ddl timetick", zap.Uint64("ts", ts), zap.Int("len(ddlTsSet)", len(t.ddlTsSet)), - zap.String("reason", reason)) - if len(t.ddlTsSet) == 0 { - t.ddlMinTs = typeutil.Timestamp(math.MaxUint64) - } else if t.ddlMinTs == ts { - // re-calculate minTs - minTs := typeutil.Timestamp(math.MaxUint64) - for tt := range t.ddlTsSet { - if tt < minTs { - minTs = tt - } - } - t.ddlMinTs = minTs - log.Debug("update ddl minTs", zap.Any("minTs", minTs)) - } -} - -func (t *timetickSync) getDdlMinTimeTick() typeutil.Timestamp { - t.ddlLock.Lock() - defer t.ddlLock.Unlock() - - return t.ddlMinTs -} - // UpdateTimeTick check msg validation and send it to local channel func (t *timetickSync) updateTimeTick(in *internalpb.ChannelTimeTickMsg, reason string) error { t.lock.Lock() @@ -210,16 +155,6 @@ func (t *timetickSync) updateTimeTick(in *internalpb.ChannelTimeTickMsg, reason return fmt.Errorf("skip ChannelTimeTickMsg from un-recognized session %d", in.Base.SourceID) } - // if ddl operation not finished, skip current ts update - ddlMinTs := t.getDdlMinTimeTick() - if in.DefaultTimestamp > ddlMinTs { - log.Info("ddl not finished", zap.Int64("source id", in.Base.SourceID), - zap.Uint64("curr ts", in.DefaultTimestamp), - zap.Uint64("ddlMinTs", ddlMinTs), - zap.String("reason", reason)) - return nil - } - if in.Base.SourceID == t.sourceID { if prev != nil && in.DefaultTimestamp <= prev.defaultTs { log.Warn("timestamp go back", zap.Int64("source id", in.Base.SourceID), @@ -385,11 +320,13 @@ func (t *timetickSync) listDmlChannels() []string { // AddDmlChannels add dml channels func (t *timetickSync) addDmlChannels(names ...string) { t.dmlChannels.addChannels(names...) + log.Info("add dml channels", zap.Strings("channels", names)) } // RemoveDmlChannels remove dml channels func (t *timetickSync) removeDmlChannels(names ...string) { t.dmlChannels.removeChannels(names...) + log.Info("remove dml channels", zap.Strings("channels", names)) } // BroadcastDmlChannels broadcasts msg pack into dml channels diff --git a/internal/rootcoord/timeticksync_test.go b/internal/rootcoord/timeticksync_test.go index 5c0ad0e76a..85c4fa7db5 100644 --- a/internal/rootcoord/timeticksync_test.go +++ b/internal/rootcoord/timeticksync_test.go @@ -61,15 +61,6 @@ func TestTimetickSync(t *testing.T) { ttSync.sendToChannel() }) - wg.Add(1) - t.Run("RemoveDdlTimeTick", func(t *testing.T) { - defer wg.Done() - ttSync.addDdlTimeTick(uint64(1), "1") - ttSync.addDdlTimeTick(uint64(2), "2") - ttSync.removeDdlTimeTick(uint64(1), "1") - assert.Equal(t, ttSync.ddlMinTs, uint64(2)) - }) - wg.Add(1) t.Run("UpdateTimeTick", func(t *testing.T) { defer wg.Done() @@ -93,11 +84,9 @@ func TestTimetickSync(t *testing.T) { cttMsg := newChanTsMsg(msg, 1) ttSync.sess2ChanTsMap[msg.Base.SourceID] = cttMsg - ttSync.ddlMinTs = uint64(100) err = ttSync.updateTimeTick(msg, "1") assert.Nil(t, err) - ttSync.ddlMinTs = uint64(300) ttSync.sourceID = int64(1) err = ttSync.updateTimeTick(msg, "1") assert.Nil(t, err) diff --git a/internal/rootcoord/undo.go b/internal/rootcoord/undo.go new file mode 100644 index 0000000000..0550ccb71e --- /dev/null +++ b/internal/rootcoord/undo.go @@ -0,0 +1,59 @@ +package rootcoord + +import ( + "context" + "fmt" + "time" + + "github.com/milvus-io/milvus/internal/log" + "go.uber.org/zap" +) + +type baseUndoTask struct { + todoStep []Step // steps to execute + undoStep []Step // steps to undo +} + +func newBaseUndoTask() *baseUndoTask { + return &baseUndoTask{ + todoStep: make([]Step, 0), + undoStep: make([]Step, 0), + } +} + +func (b *baseUndoTask) AddStep(todoStep, undoStep Step) { + b.todoStep = append(b.todoStep, todoStep) + b.undoStep = append(b.undoStep, undoStep) +} + +func (b *baseUndoTask) undoFromLastFinished(lastFinished int) { + // You cannot just use the ctx of task, since it will be canceled after response is returned. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + for i := lastFinished; i >= 0; i-- { + undo := b.undoStep[i] + if err := undo.Execute(ctx); err != nil { + // You depend on the collection meta to do other gc. + // TODO: add ddl logger after other service can be idempotent enough, then you can do separate steps + // independently. + log.Error("failed to execute step, garbage may be generated", zap.Error(err)) + return + } + } +} + +func (b *baseUndoTask) Execute(ctx context.Context) error { + if len(b.todoStep) != len(b.undoStep) { + return fmt.Errorf("todo step and undo step length not equal") + } + for i := 0; i < len(b.todoStep); i++ { + todoStep := b.todoStep[i] + err := todoStep.Execute(ctx) + if err != nil { + go b.undoFromLastFinished(i - 1) + log.Warn("failed to execute step, trying to undo", zap.Error(err)) + return err + } + } + return nil +} diff --git a/internal/rootcoord/undo_test.go b/internal/rootcoord/undo_test.go new file mode 100644 index 0000000000..235a5f321b --- /dev/null +++ b/internal/rootcoord/undo_test.go @@ -0,0 +1,93 @@ +package rootcoord + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_baseUndoTask_Execute(t *testing.T) { + t.Run("should not happen", func(t *testing.T) { + undoTask := newBaseUndoTask() + undoTask.todoStep = append(undoTask.todoStep, newMockNormalStep()) + err := undoTask.Execute(context.Background()) + assert.Error(t, err) + }) + + t.Run("normal case, no undo step will be called", func(t *testing.T) { + undoTask := newBaseUndoTask() + n := 10 + todoSteps, undoSteps := make([]Step, 0, n), make([]Step, 0, n) + for i := 0; i < n; i++ { + normalTodoStep := newMockNormalStep() + normalUndoStep := newMockNormalStep() + todoSteps = append(todoSteps, normalTodoStep) + undoSteps = append(undoSteps, normalUndoStep) + } + for i := 0; i < n; i++ { + undoTask.AddStep(todoSteps[i], undoSteps[i]) + } + err := undoTask.Execute(context.Background()) + assert.NoError(t, err) + // make sure no undo steps will be called. + for _, step := range undoSteps { + assert.False(t, step.(*mockNormalStep).called) + } + }) + + t.Run("partial error, undo from last finished", func(t *testing.T) { + undoTask := newBaseUndoTask() + todoSteps := []Step{ + newMockNormalStep(), + newMockFailStep(), + newMockNormalStep(), + } + undoSteps := []Step{ + newMockNormalStep(), + newMockNormalStep(), + newMockNormalStep(), + } + l := len(todoSteps) + for i := 0; i < l; i++ { + undoTask.AddStep(todoSteps[i], undoSteps[i]) + } + err := undoTask.Execute(context.Background()) + assert.Error(t, err) + assert.True(t, todoSteps[0].(*mockNormalStep).called) + assert.True(t, todoSteps[1].(*mockFailStep).called) + assert.False(t, todoSteps[2].(*mockNormalStep).called) + + <-undoSteps[0].(*mockNormalStep).calledChan + assert.True(t, undoSteps[0].(*mockNormalStep).called) + assert.False(t, undoSteps[1].(*mockNormalStep).called) + assert.False(t, undoSteps[2].(*mockNormalStep).called) + }) + + t.Run("partial error, undo meet error also", func(t *testing.T) { + undoTask := newBaseUndoTask() + todoSteps := []Step{ + newMockNormalStep(), + newMockNormalStep(), + newMockFailStep(), + } + undoSteps := []Step{ + newMockNormalStep(), + newMockFailStep(), + newMockNormalStep(), + } + l := len(todoSteps) + for i := 0; i < l; i++ { + undoTask.AddStep(todoSteps[i], undoSteps[i]) + } + err := undoTask.Execute(context.Background()) + assert.Error(t, err) + assert.True(t, todoSteps[0].(*mockNormalStep).called) + assert.True(t, todoSteps[1].(*mockNormalStep).called) + assert.True(t, todoSteps[2].(*mockFailStep).called) + assert.False(t, undoSteps[0].(*mockNormalStep).called) + <-undoSteps[1].(*mockFailStep).calledChan + assert.True(t, undoSteps[1].(*mockFailStep).called) + assert.False(t, undoSteps[2].(*mockNormalStep).called) + }) +} diff --git a/internal/rootcoord/util.go b/internal/rootcoord/util.go index 6bb0ca0e15..2bca33abfb 100644 --- a/internal/rootcoord/util.go +++ b/internal/rootcoord/util.go @@ -27,7 +27,6 @@ import ( "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/golang/protobuf/proto" "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/util/typeutil" @@ -66,23 +65,6 @@ func GetFieldSchemaByID(coll *model.Collection, fieldID typeutil.UniqueID) (*mod return nil, fmt.Errorf("field id = %d not found", fieldID) } -// EncodeDdOperation serialize DdOperation into string -func EncodeDdOperation(m proto.Message, ddType string) (string, error) { - mByte, err := proto.Marshal(m) - if err != nil { - return "", err - } - ddOp := DdOperation{ - Body: mByte, - Type: ddType, - } - ddOpByte, err := json.Marshal(ddOp) - if err != nil { - return "", err - } - return string(ddOpByte), nil -} - // EncodeMsgPositions serialize []*MsgPosition into string func EncodeMsgPositions(msgPositions []*msgstream.MsgPosition) (string, error) { if len(msgPositions) == 0 { @@ -118,3 +100,24 @@ func Int64TupleMapToSlice(s map[int]common.Int64Tuple) []common.Int64Tuple { } return ret } + +func CheckMsgType(got, expect commonpb.MsgType) error { + if got != expect { + return fmt.Errorf("invalid msg type, expect %s, but got %s", expect, got) + } + return nil +} + +func failStatus(code commonpb.ErrorCode, reason string) *commonpb.Status { + return &commonpb.Status{ + ErrorCode: code, + Reason: reason, + } +} + +func succStatus() *commonpb.Status { + return &commonpb.Status{ + ErrorCode: commonpb.ErrorCode_Success, + Reason: "", + } +} diff --git a/internal/tso/mock_global_allocator.go b/internal/tso/mock_global_allocator.go new file mode 100644 index 0000000000..d142f384a3 --- /dev/null +++ b/internal/tso/mock_global_allocator.go @@ -0,0 +1,43 @@ +package tso + +import ( + "time" +) + +type MockAllocator struct { + Allocator + InitializeF func() error + UpdateTSOF func() error + SetTSOF func(tso uint64) error + GenerateTSOF func(count uint32) (uint64, error) + ResetF func() + GetLastSavedTimeF func() time.Time +} + +func (m MockAllocator) Initialize() error { + return m.InitializeF() +} + +func (m MockAllocator) UpdateTSO() error { + return m.UpdateTSOF() +} + +func (m MockAllocator) SetTSO(tso uint64) error { + return m.SetTSOF(tso) +} + +func (m MockAllocator) GenerateTSO(count uint32) (uint64, error) { + return m.GenerateTSOF(count) +} + +func (m MockAllocator) Reset() { + m.ResetF() +} + +func (m MockAllocator) GetLastSavedTime() time.Time { + return m.GetLastSavedTimeF() +} + +func NewMockAllocator() *MockAllocator { + return &MockAllocator{} +} diff --git a/internal/types/types.go b/internal/types/types.go index fe229bf166..81da389fcb 100644 --- a/internal/types/types.go +++ b/internal/types/types.go @@ -629,20 +629,6 @@ type RootCoord interface { // error is always nil ShowSegments(ctx context.Context, req *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) - //DescribeSegments(ctx context.Context, in *rootcoordpb.DescribeSegmentsRequest) (*rootcoordpb.DescribeSegmentsResponse, error) - - // ReleaseDQLMessageStream notifies RootCoord to release and close the search message stream of specific collection. - // - // ctx is the request to control request deadline and cancellation. - // request contains the request params, which are database id(not used) and collection id. - // - // The `ErrorCode` of `Status` is `Success` if drop index successfully; - // otherwise, the `ErrorCode` of `Status` will be `Error`, and the `Reason` of `Status` will record the fail cause. - // error is always nil - // - // RootCoord just forwards this request to Proxy client - ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) - // InvalidateCollectionMetaCache notifies RootCoord to clear the meta cache of specific collection in Proxies. // If `CollectionID` is specified in request, all the collection meta cache with the specified collectionID will be // invalidated, if only the `CollectionName` is specified in request, only the collection meta cache with the @@ -807,22 +793,6 @@ type Proxy interface { UpdateCredentialCache(ctx context.Context, request *proxypb.UpdateCredCacheRequest) (*commonpb.Status, error) - // ReleaseDQLMessageStream notifies Proxy to release and close the search message stream of specific collection. - // - // ReleaseDQLMessageStream should be called when the specific collection was released. - // - // ctx is the request to control request deadline and cancellation. - // request contains the request params, which are database id(not used now) and collection id. - // - // ReleaseDQLMessageStream should always succeed even though the specific collection doesn't exist in Proxy. - // So the code of response `Status` should be always `Success`. - // - // error is returned only when some communication issue occurs. - ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) - - SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) - SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) - RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) } diff --git a/internal/util/mock/grpc_proxy_client.go b/internal/util/mock/grpc_proxy_client.go index a0e547dc86..d404948fc9 100644 --- a/internal/util/mock/grpc_proxy_client.go +++ b/internal/util/mock/grpc_proxy_client.go @@ -53,18 +53,6 @@ func (m *GrpcProxyClient) GetDdChannel(ctx context.Context, in *internalpb.GetDd return &milvuspb.StringResponse{}, m.Err } -func (m *GrpcProxyClient) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { - return &commonpb.Status{}, m.Err -} - -func (m *GrpcProxyClient) SendSearchResult(ctx context.Context, in *internalpb.SearchResults, opts ...grpc.CallOption) (*commonpb.Status, error) { - return &commonpb.Status{}, m.Err -} - -func (m *GrpcProxyClient) SendRetrieveResult(ctx context.Context, in *internalpb.RetrieveResults, opts ...grpc.CallOption) (*commonpb.Status, error) { - return &commonpb.Status{}, m.Err -} - func (m *GrpcProxyClient) InvalidateCredentialCache(ctx context.Context, in *proxypb.InvalidateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { return &commonpb.Status{}, m.Err } diff --git a/internal/util/mock/grpc_rootcoord_client.go b/internal/util/mock/grpc_rootcoord_client.go index 7384bbf04e..3468bc428a 100644 --- a/internal/util/mock/grpc_rootcoord_client.go +++ b/internal/util/mock/grpc_rootcoord_client.go @@ -165,10 +165,6 @@ func (m *GrpcRootCoordClient) UpdateChannelTimeTick(ctx context.Context, in *int return &commonpb.Status{}, m.Err } -func (m *GrpcRootCoordClient) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { - return &commonpb.Status{}, m.Err -} - func (m *GrpcRootCoordClient) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { return &commonpb.Status{}, m.Err } diff --git a/scripts/sql/meta.sql b/scripts/sql/meta.sql index 365f25beda..e1222ae3a9 100644 --- a/scripts/sql/meta.sql +++ b/scripts/sql/meta.sql @@ -20,6 +20,7 @@ CREATE TABLE if not exists milvus_meta.collections ( shards_num INT, start_position TEXT, consistency_level INT, + status INT NOT NULL, ts BIGINT UNSIGNED DEFAULT 0, is_deleted BOOL DEFAULT FALSE, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -90,6 +91,7 @@ CREATE TABLE if not exists milvus_meta.`partitions` ( partition_name VARCHAR(256), partition_created_timestamp bigint unsigned, collection_id BIGINT NOT NULL, + status INT NOT NULL, ts BIGINT UNSIGNED DEFAULT 0, is_deleted BOOL DEFAULT FALSE, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, diff --git a/tests/python_client/testcases/test_alias.py b/tests/python_client/testcases/test_alias.py index aae687e4c9..15b3df5674 100644 --- a/tests/python_client/testcases/test_alias.py +++ b/tests/python_client/testcases/test_alias.py @@ -333,7 +333,7 @@ class TestAliasOperation(TestcaseBase): method: 1.create collection with alias 2.call drop_collection function with alias as param - expected: collection is dropped + expected: Got error: collection cannot be dropped via alias. """ self._connect() c_name = cf.gen_unique_str("collection") @@ -467,9 +467,12 @@ class TestAliasOperationInvalid(TestcaseBase): alias_not_exist_name = cf.gen_unique_str(prefix) error = {ct.err_code: 1, ct.err_msg: "Drop alias failed: alias does not exist"} - self.utility_wrap.drop_alias(alias_not_exist_name, - check_task=CheckTasks.err_res, - check_items=error) + # self.utility_wrap.drop_alias(alias_not_exist_name, + # check_task=CheckTasks.err_res, + # check_items=error) + # @longjiquan: dropping alias should be idempotent. + self.utility_wrap.drop_alias(alias_not_exist_name) + # # collection_w.drop_alias(alias_not_exist_name, # check_task=CheckTasks.err_res, @@ -496,10 +499,14 @@ class TestAliasOperationInvalid(TestcaseBase): # collection_w.create_alias(alias_name) # collection_w.drop_alias(alias_name) - error = {ct.err_code: 1, ct.err_msg: "Drop alias failed: alias does not exist"} - self.utility_wrap.drop_alias(alias_name, - check_task=CheckTasks.err_res, - check_items=error) + # @longjiquan: dropping alias should be idempotent. + self.utility_wrap.drop_alias(alias_name) + + # error = {ct.err_code: 1, ct.err_msg: "Drop alias failed: alias does not exist"} + # self.utility_wrap.drop_alias(alias_name, + # check_task=CheckTasks.err_res, + # check_items=error) + # collection_w.drop_alias(alias_name, # check_task=CheckTasks.err_res, # check_items=error) diff --git a/tests/python_client/testcases/test_collection.py b/tests/python_client/testcases/test_collection.py index a401f55e7e..5f1a8e08bf 100644 --- a/tests/python_client/testcases/test_collection.py +++ b/tests/python_client/testcases/test_collection.py @@ -1741,8 +1741,10 @@ class TestDropCollection(TestcaseBase): c_name = cf.gen_unique_str() self.init_collection_wrap(name=c_name) c_name_2 = cf.gen_unique_str() - error = {ct.err_code: 0, ct.err_msg: 'DescribeCollection failed: can\'t find collection: %s' % c_name_2} - self.utility_wrap.drop_collection(c_name_2, check_task=CheckTasks.err_res, check_items=error) + # error = {ct.err_code: 0, ct.err_msg: 'DescribeCollection failed: can\'t find collection: %s' % c_name_2} + # self.utility_wrap.drop_collection(c_name_2, check_task=CheckTasks.err_res, check_items=error) + # @longjiquan: dropping collection should be idempotent. + self.utility_wrap.drop_collection(c_name_2) @pytest.mark.tags(CaseLabel.L1) def test_create_drop_collection_multithread(self): @@ -2157,7 +2159,7 @@ class TestLoadCollection(TestcaseBase): collection_wr.load() collection_wr.drop() error = {ct.err_code: 0, - ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name} + ct.err_msg: "can't find collection"} collection_wr.release(check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tags(CaseLabel.L0) @@ -2773,7 +2775,7 @@ class TestLoadPartition(TestcaseBase): "is_empty": True, "num_entities": 0} ) collection_w.drop() - error = {ct.err_code: 0, ct.err_msg: "HasPartition failed: can\'t find collection: %s" % name} + error = {ct.err_code: 0, ct.err_msg: "can\'t find collection"} partition_w.load(check_task=CheckTasks.err_res, check_items=error) partition_w.release(check_task=CheckTasks.err_res, check_items=error) diff --git a/tests/python_client/testcases/test_utility.py b/tests/python_client/testcases/test_utility.py index 9d69fe51f5..3cc43d86f0 100644 --- a/tests/python_client/testcases/test_utility.py +++ b/tests/python_client/testcases/test_utility.py @@ -297,8 +297,12 @@ class TestUtilityParams(TestcaseBase): """ self._connect() c_name = cf.gen_unique_str(prefix) - error = {ct.err_code: 1, ct.err_msg: f"DescribeCollection failed: can't find collection: {c_name}"} - self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) + + # error = {ct.err_code: 1, ct.err_msg: f"DescribeCollection failed: can't find collection: {c_name}"} + # self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) + + # @longjiquan: dropping collection should be idempotent. + self.utility_wrap.drop_collection(c_name) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_left_vector_invalid_type(self, get_invalid_vector_dict): @@ -977,8 +981,11 @@ class TestUtilityBase(TestcaseBase): assert self.utility_wrap.has_collection(c_name)[0] collection_w.drop() assert not self.utility_wrap.has_collection(c_name)[0] - error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}} - self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) + + # error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}} + # self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) + # @longjiquan: dropping collection should be idempotent. + self.utility_wrap.drop_collection(c_name) @pytest.mark.tags(CaseLabel.L2) def test_drop_collection_create_repeatedly(self):