Support to replicate the mq message (#27240)

Signed-off-by: SimFG <bang.fu@zilliz.com>
This commit is contained in:
SimFG 2023-10-20 14:26:09 +08:00 committed by GitHub
parent 5247ea3fd1
commit 9b0ecbdca7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 3439 additions and 469 deletions

View File

@ -464,6 +464,7 @@ common:
rootCoordTimeTick: rootcoord-timetick rootCoordTimeTick: rootcoord-timetick
rootCoordStatistics: rootcoord-statistics rootCoordStatistics: rootcoord-statistics
rootCoordDml: rootcoord-dml rootCoordDml: rootcoord-dml
replicateMsg: replicate-msg
rootCoordDelta: rootcoord-delta rootCoordDelta: rootcoord-delta
search: search search: search
searchResult: searchResult searchResult: searchResult
@ -528,6 +529,7 @@ common:
threshold: threshold:
info: 500 # minimum milliseconds for printing durations in info level info: 500 # minimum milliseconds for printing durations in info level
warn: 1000 # minimum milliseconds for printing durations in warn level warn: 1000 # minimum milliseconds for printing durations in warn level
ttMsgEnabled: true # Whether the instance disable sending ts messages
# QuotaConfig, configurations of Milvus quota and limits. # QuotaConfig, configurations of Milvus quota and limits.
# By default, we enable: # By default, we enable:

5
go.mod
View File

@ -24,7 +24,8 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/klauspost/compress v1.16.5 github.com/klauspost/compress v1.16.5
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231017024957-5e5a27fd4875 github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231019101159-a0a6f5e7eff8
github.com/milvus-io/milvus/pkg v0.0.1
github.com/minio/minio-go/v7 v7.0.56 github.com/minio/minio-go/v7 v7.0.56
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0 github.com/prometheus/client_model v0.3.0
@ -58,8 +59,6 @@ require (
stathat.com/c/consistent v1.0.0 stathat.com/c/consistent v1.0.0
) )
require github.com/milvus-io/milvus/pkg v0.0.0-20230607023836-1593278f9d9c
require ( require (
cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute v1.19.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect

4
go.sum
View File

@ -582,8 +582,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8= github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b h1:TfeY0NxYxZzUfIfYe5qYDBzt4ZYRqzUjTR6CvUzjat8=
github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4= github.com/milvus-io/gorocksdb v0.0.0-20220624081344-8c5f4212846b/go.mod h1:iwW+9cWfIzzDseEBCCeDSN5SD16Tidvy8cwQ7ZY8Qj4=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231017024957-5e5a27fd4875 h1:7OPJn0sOeueXNnreWup0GR7ZlXEURpcKklzplXM9kDg= github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231019101159-a0a6f5e7eff8 h1:GoGErEOhdWjwSfQilXso3eINqb11yEBDLtoBMNdlve0=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231017024957-5e5a27fd4875/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek= github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231019101159-a0a6f5e7eff8/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A= github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A=
github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w= github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=

View File

@ -121,20 +121,9 @@ struct AddressDefaultTypeInternal {
}; };
}; };
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AddressDefaultTypeInternal _Address_default_instance_; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AddressDefaultTypeInternal _Address_default_instance_;
PROTOBUF_CONSTEXPR MsgBase_PropertiesEntry_DoNotUse::MsgBase_PropertiesEntry_DoNotUse(
::_pbi::ConstantInitialized) {}
struct MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal {
PROTOBUF_CONSTEXPR MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal()
: _instance(::_pbi::ConstantInitialized{}) {}
~MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal() {}
union {
MsgBase_PropertiesEntry_DoNotUse _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal _MsgBase_PropertiesEntry_DoNotUse_default_instance_;
PROTOBUF_CONSTEXPR MsgBase::MsgBase( PROTOBUF_CONSTEXPR MsgBase::MsgBase(
::_pbi::ConstantInitialized): _impl_{ ::_pbi::ConstantInitialized): _impl_{
/*decltype(_impl_.properties_)*/{::_pbi::ConstantInitialized()} /*decltype(_impl_.replicate_)*/nullptr
, /*decltype(_impl_.msgid_)*/int64_t{0} , /*decltype(_impl_.msgid_)*/int64_t{0}
, /*decltype(_impl_.timestamp_)*/uint64_t{0u} , /*decltype(_impl_.timestamp_)*/uint64_t{0u}
, /*decltype(_impl_.sourceid_)*/int64_t{0} , /*decltype(_impl_.sourceid_)*/int64_t{0}
@ -150,6 +139,19 @@ struct MsgBaseDefaultTypeInternal {
}; };
}; };
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MsgBaseDefaultTypeInternal _MsgBase_default_instance_; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MsgBaseDefaultTypeInternal _MsgBase_default_instance_;
PROTOBUF_CONSTEXPR ReplicateEntity::ReplicateEntity(
::_pbi::ConstantInitialized): _impl_{
/*decltype(_impl_.isreplicate_)*/false
, /*decltype(_impl_._cached_size_)*/{}} {}
struct ReplicateEntityDefaultTypeInternal {
PROTOBUF_CONSTEXPR ReplicateEntityDefaultTypeInternal()
: _instance(::_pbi::ConstantInitialized{}) {}
~ReplicateEntityDefaultTypeInternal() {}
union {
ReplicateEntity _instance;
};
};
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ReplicateEntityDefaultTypeInternal _ReplicateEntity_default_instance_;
PROTOBUF_CONSTEXPR MsgHeader::MsgHeader( PROTOBUF_CONSTEXPR MsgHeader::MsgHeader(
::_pbi::ConstantInitialized): _impl_{ ::_pbi::ConstantInitialized): _impl_{
/*decltype(_impl_.base_)*/nullptr /*decltype(_impl_.base_)*/nullptr
@ -329,16 +331,6 @@ const uint32_t TableStruct_common_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(p
~0u, // no _inlined_string_donated_ ~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::Address, _impl_.ip_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::Address, _impl_.ip_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::Address, _impl_.port_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::Address, _impl_.port_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse, _has_bits_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse, key_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse, value_),
0,
1,
~0u, // no _has_bits_ ~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _internal_metadata_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _internal_metadata_),
~0u, // no _extensions_ ~0u, // no _extensions_
@ -350,7 +342,14 @@ const uint32_t TableStruct_common_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(p
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.timestamp_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.timestamp_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.sourceid_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.sourceid_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.targetid_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.targetid_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.properties_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgBase, _impl_.replicate_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::ReplicateEntity, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
~0u, // no _inlined_string_donated_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::ReplicateEntity, _impl_.isreplicate_),
~0u, // no _has_bits_ ~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgHeader, _internal_metadata_), PROTOBUF_FIELD_OFFSET(::milvus::proto::common::MsgHeader, _internal_metadata_),
~0u, // no _extensions_ ~0u, // no _extensions_
@ -437,16 +436,16 @@ static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protode
{ 32, -1, -1, sizeof(::milvus::proto::common::PlaceholderValue)}, { 32, -1, -1, sizeof(::milvus::proto::common::PlaceholderValue)},
{ 41, -1, -1, sizeof(::milvus::proto::common::PlaceholderGroup)}, { 41, -1, -1, sizeof(::milvus::proto::common::PlaceholderGroup)},
{ 48, -1, -1, sizeof(::milvus::proto::common::Address)}, { 48, -1, -1, sizeof(::milvus::proto::common::Address)},
{ 56, 64, -1, sizeof(::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse)}, { 56, -1, -1, sizeof(::milvus::proto::common::MsgBase)},
{ 66, -1, -1, sizeof(::milvus::proto::common::MsgBase)}, { 68, -1, -1, sizeof(::milvus::proto::common::ReplicateEntity)},
{ 78, -1, -1, sizeof(::milvus::proto::common::MsgHeader)}, { 75, -1, -1, sizeof(::milvus::proto::common::MsgHeader)},
{ 85, -1, -1, sizeof(::milvus::proto::common::DMLMsgHeader)}, { 82, -1, -1, sizeof(::milvus::proto::common::DMLMsgHeader)},
{ 93, -1, -1, sizeof(::milvus::proto::common::PrivilegeExt)}, { 90, -1, -1, sizeof(::milvus::proto::common::PrivilegeExt)},
{ 103, -1, -1, sizeof(::milvus::proto::common::SegmentStats)}, { 100, -1, -1, sizeof(::milvus::proto::common::SegmentStats)},
{ 111, 119, -1, sizeof(::milvus::proto::common::ClientInfo_ReservedEntry_DoNotUse)}, { 108, 116, -1, sizeof(::milvus::proto::common::ClientInfo_ReservedEntry_DoNotUse)},
{ 121, -1, -1, sizeof(::milvus::proto::common::ClientInfo)}, { 118, -1, -1, sizeof(::milvus::proto::common::ClientInfo)},
{ 133, 141, -1, sizeof(::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse)}, { 130, 138, -1, sizeof(::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse)},
{ 143, -1, -1, sizeof(::milvus::proto::common::ServerInfo)}, { 140, -1, -1, sizeof(::milvus::proto::common::ServerInfo)},
}; };
static const ::_pb::Message* const file_default_instances[] = { static const ::_pb::Message* const file_default_instances[] = {
@ -457,8 +456,8 @@ static const ::_pb::Message* const file_default_instances[] = {
&::milvus::proto::common::_PlaceholderValue_default_instance_._instance, &::milvus::proto::common::_PlaceholderValue_default_instance_._instance,
&::milvus::proto::common::_PlaceholderGroup_default_instance_._instance, &::milvus::proto::common::_PlaceholderGroup_default_instance_._instance,
&::milvus::proto::common::_Address_default_instance_._instance, &::milvus::proto::common::_Address_default_instance_._instance,
&::milvus::proto::common::_MsgBase_PropertiesEntry_DoNotUse_default_instance_._instance,
&::milvus::proto::common::_MsgBase_default_instance_._instance, &::milvus::proto::common::_MsgBase_default_instance_._instance,
&::milvus::proto::common::_ReplicateEntity_default_instance_._instance,
&::milvus::proto::common::_MsgHeader_default_instance_._instance, &::milvus::proto::common::_MsgHeader_default_instance_._instance,
&::milvus::proto::common::_DMLMsgHeader_default_instance_._instance, &::milvus::proto::common::_DMLMsgHeader_default_instance_._instance,
&::milvus::proto::common::_PrivilegeExt_default_instance_._instance, &::milvus::proto::common::_PrivilegeExt_default_instance_._instance,
@ -482,182 +481,182 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"alues\030\003 \003(\014\"O\n\020PlaceholderGroup\022;\n\014place" "alues\030\003 \003(\014\"O\n\020PlaceholderGroup\022;\n\014place"
"holders\030\001 \003(\0132%.milvus.proto.common.Plac" "holders\030\001 \003(\0132%.milvus.proto.common.Plac"
"eholderValue\"#\n\007Address\022\n\n\002ip\030\001 \001(\t\022\014\n\004p" "eholderValue\"#\n\007Address\022\n\n\002ip\030\001 \001(\t\022\014\n\004p"
"ort\030\002 \001(\003\"\364\001\n\007MsgBase\022.\n\010msg_type\030\001 \001(\0162" "ort\030\002 \001(\003\"\270\001\n\007MsgBase\022.\n\010msg_type\030\001 \001(\0162"
"\034.milvus.proto.common.MsgType\022\r\n\005msgID\030\002" "\034.milvus.proto.common.MsgType\022\r\n\005msgID\030\002"
" \001(\003\022\021\n\ttimestamp\030\003 \001(\004\022\020\n\010sourceID\030\004 \001(" " \001(\003\022\021\n\ttimestamp\030\003 \001(\004\022\020\n\010sourceID\030\004 \001("
"\003\022\020\n\010targetID\030\005 \001(\003\022@\n\nproperties\030\006 \003(\0132" "\003\022\020\n\010targetID\030\005 \001(\003\0227\n\treplicate\030\006 \001(\0132$"
",.milvus.proto.common.MsgBase.Properties" ".milvus.proto.common.ReplicateEntity\"&\n\017"
"Entry\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n" "ReplicateEntity\022\023\n\013isReplicate\030\001 \001(\010\"7\n\t"
"\005value\030\002 \001(\t:\0028\001\"7\n\tMsgHeader\022*\n\004base\030\001 "
"\001(\0132\034.milvus.proto.common.MsgBase\"M\n\014DML"
"MsgHeader\022*\n\004base\030\001 \001(\0132\034.milvus.proto.c" "MsgHeader\022*\n\004base\030\001 \001(\0132\034.milvus.proto.c"
"ommon.MsgBase\022\021\n\tshardName\030\002 \001(\t\"\273\001\n\014Pri" "ommon.MsgBase\"M\n\014DMLMsgHeader\022*\n\004base\030\001 "
"vilegeExt\0224\n\013object_type\030\001 \001(\0162\037.milvus." "\001(\0132\034.milvus.proto.common.MsgBase\022\021\n\tsha"
"proto.common.ObjectType\022>\n\020object_privil" "rdName\030\002 \001(\t\"\273\001\n\014PrivilegeExt\0224\n\013object_"
"ege\030\002 \001(\0162$.milvus.proto.common.ObjectPr" "type\030\001 \001(\0162\037.milvus.proto.common.ObjectT"
"ivilege\022\031\n\021object_name_index\030\003 \001(\005\022\032\n\022ob" "ype\022>\n\020object_privilege\030\002 \001(\0162$.milvus.p"
"ject_name_indexs\030\004 \001(\005\"2\n\014SegmentStats\022\021" "roto.common.ObjectPrivilege\022\031\n\021object_na"
"\n\tSegmentID\030\001 \001(\003\022\017\n\007NumRows\030\002 \001(\003\"\325\001\n\nC" "me_index\030\003 \001(\005\022\032\n\022object_name_indexs\030\004 \001"
"lientInfo\022\020\n\010sdk_type\030\001 \001(\t\022\023\n\013sdk_versi" "(\005\"2\n\014SegmentStats\022\021\n\tSegmentID\030\001 \001(\003\022\017\n"
"on\030\002 \001(\t\022\022\n\nlocal_time\030\003 \001(\t\022\014\n\004user\030\004 \001" "\007NumRows\030\002 \001(\003\"\325\001\n\nClientInfo\022\020\n\010sdk_typ"
"(\t\022\014\n\004host\030\005 \001(\t\022\?\n\010reserved\030\006 \003(\0132-.mil" "e\030\001 \001(\t\022\023\n\013sdk_version\030\002 \001(\t\022\022\n\nlocal_ti"
"vus.proto.common.ClientInfo.ReservedEntr" "me\030\003 \001(\t\022\014\n\004user\030\004 \001(\t\022\014\n\004host\030\005 \001(\t\022\?\n\010"
"y\032/\n\rReservedEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value" "reserved\030\006 \003(\0132-.milvus.proto.common.Cli"
"\030\002 \001(\t:\0028\001\"\343\001\n\nServerInfo\022\022\n\nbuild_tags\030" "entInfo.ReservedEntry\032/\n\rReservedEntry\022\013"
"\001 \001(\t\022\022\n\nbuild_time\030\002 \001(\t\022\022\n\ngit_commit\030" "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\343\001\n\nServe"
"\003 \001(\t\022\022\n\ngo_version\030\004 \001(\t\022\023\n\013deploy_mode" "rInfo\022\022\n\nbuild_tags\030\001 \001(\t\022\022\n\nbuild_time\030"
"\030\005 \001(\t\022\?\n\010reserved\030\006 \003(\0132-.milvus.proto." "\002 \001(\t\022\022\n\ngit_commit\030\003 \001(\t\022\022\n\ngo_version\030"
"common.ServerInfo.ReservedEntry\032/\n\rReser" "\004 \001(\t\022\023\n\013deploy_mode\030\005 \001(\t\022\?\n\010reserved\030\006"
"vedEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001" " \003(\0132-.milvus.proto.common.ServerInfo.Re"
"*\303\n\n\tErrorCode\022\013\n\007Success\020\000\022\023\n\017Unexpecte" "servedEntry\032/\n\rReservedEntry\022\013\n\003key\030\001 \001("
"dError\020\001\022\021\n\rConnectFailed\020\002\022\024\n\020Permissio" "\t\022\r\n\005value\030\002 \001(\t:\0028\001*\303\n\n\tErrorCode\022\013\n\007Su"
"nDenied\020\003\022\027\n\023CollectionNotExists\020\004\022\023\n\017Il" "ccess\020\000\022\023\n\017UnexpectedError\020\001\022\021\n\rConnectF"
"legalArgument\020\005\022\024\n\020IllegalDimension\020\007\022\024\n" "ailed\020\002\022\024\n\020PermissionDenied\020\003\022\027\n\023Collect"
"\020IllegalIndexType\020\010\022\031\n\025IllegalCollection" "ionNotExists\020\004\022\023\n\017IllegalArgument\020\005\022\024\n\020I"
"Name\020\t\022\017\n\013IllegalTOPK\020\n\022\024\n\020IllegalRowRec" "llegalDimension\020\007\022\024\n\020IllegalIndexType\020\010\022"
"ord\020\013\022\023\n\017IllegalVectorID\020\014\022\027\n\023IllegalSea" "\031\n\025IllegalCollectionName\020\t\022\017\n\013IllegalTOP"
"rchResult\020\r\022\020\n\014FileNotFound\020\016\022\016\n\nMetaFai" "K\020\n\022\024\n\020IllegalRowRecord\020\013\022\023\n\017IllegalVect"
"led\020\017\022\017\n\013CacheFailed\020\020\022\026\n\022CannotCreateFo" "orID\020\014\022\027\n\023IllegalSearchResult\020\r\022\020\n\014FileN"
"lder\020\021\022\024\n\020CannotCreateFile\020\022\022\026\n\022CannotDe" "otFound\020\016\022\016\n\nMetaFailed\020\017\022\017\n\013CacheFailed"
"leteFolder\020\023\022\024\n\020CannotDeleteFile\020\024\022\023\n\017Bu" "\020\020\022\026\n\022CannotCreateFolder\020\021\022\024\n\020CannotCrea"
"ildIndexError\020\025\022\020\n\014IllegalNLIST\020\026\022\025\n\021Ill" "teFile\020\022\022\026\n\022CannotDeleteFolder\020\023\022\024\n\020Cann"
"egalMetricType\020\027\022\017\n\013OutOfMemory\020\030\022\021\n\rInd" "otDeleteFile\020\024\022\023\n\017BuildIndexError\020\025\022\020\n\014I"
"exNotExist\020\031\022\023\n\017EmptyCollection\020\032\022\033\n\027Upd" "llegalNLIST\020\026\022\025\n\021IllegalMetricType\020\027\022\017\n\013"
"ateImportTaskFailure\020\033\022\032\n\026CollectionName" "OutOfMemory\020\030\022\021\n\rIndexNotExist\020\031\022\023\n\017Empt"
"NotFound\020\034\022\033\n\027CreateCredentialFailure\020\035\022" "yCollection\020\032\022\033\n\027UpdateImportTaskFailure"
"\033\n\027UpdateCredentialFailure\020\036\022\033\n\027DeleteCr" "\020\033\022\032\n\026CollectionNameNotFound\020\034\022\033\n\027Create"
"edentialFailure\020\037\022\030\n\024GetCredentialFailur" "CredentialFailure\020\035\022\033\n\027UpdateCredentialF"
"e\020 \022\030\n\024ListCredUsersFailure\020!\022\022\n\016GetUser" "ailure\020\036\022\033\n\027DeleteCredentialFailure\020\037\022\030\n"
"Failure\020\"\022\025\n\021CreateRoleFailure\020#\022\023\n\017Drop" "\024GetCredentialFailure\020 \022\030\n\024ListCredUsers"
"RoleFailure\020$\022\032\n\026OperateUserRoleFailure\020" "Failure\020!\022\022\n\016GetUserFailure\020\"\022\025\n\021CreateR"
"%\022\025\n\021SelectRoleFailure\020&\022\025\n\021SelectUserFa" "oleFailure\020#\022\023\n\017DropRoleFailure\020$\022\032\n\026Ope"
"ilure\020\'\022\031\n\025SelectResourceFailure\020(\022\033\n\027Op" "rateUserRoleFailure\020%\022\025\n\021SelectRoleFailu"
"eratePrivilegeFailure\020)\022\026\n\022SelectGrantFa" "re\020&\022\025\n\021SelectUserFailure\020\'\022\031\n\025SelectRes"
"ilure\020*\022!\n\035RefreshPolicyInfoCacheFailure" "ourceFailure\020(\022\033\n\027OperatePrivilegeFailur"
"\020+\022\025\n\021ListPolicyFailure\020,\022\022\n\016NotShardLea" "e\020)\022\026\n\022SelectGrantFailure\020*\022!\n\035RefreshPo"
"der\020-\022\026\n\022NoReplicaAvailable\020.\022\023\n\017Segment" "licyInfoCacheFailure\020+\022\025\n\021ListPolicyFail"
"NotFound\020/\022\r\n\tForceDeny\0200\022\r\n\tRateLimit\0201" "ure\020,\022\022\n\016NotShardLeader\020-\022\026\n\022NoReplicaAv"
"\022\022\n\016NodeIDNotMatch\0202\022\024\n\020UpsertAutoIDTrue" "ailable\020.\022\023\n\017SegmentNotFound\020/\022\r\n\tForceD"
"\0203\022\034\n\030InsufficientMemoryToLoad\0204\022\030\n\024Memo" "eny\0200\022\r\n\tRateLimit\0201\022\022\n\016NodeIDNotMatch\0202"
"ryQuotaExhausted\0205\022\026\n\022DiskQuotaExhausted" "\022\024\n\020UpsertAutoIDTrue\0203\022\034\n\030InsufficientMe"
"\0206\022\025\n\021TimeTickLongDelay\0207\022\021\n\rNotReadySer" "moryToLoad\0204\022\030\n\024MemoryQuotaExhausted\0205\022\026"
"ve\0208\022\033\n\027NotReadyCoordActivating\0209\022\017\n\013Dat" "\n\022DiskQuotaExhausted\0206\022\025\n\021TimeTickLongDe"
"aCoordNA\020d\022\022\n\rDDRequestRace\020\350\007*c\n\nIndexS" "lay\0207\022\021\n\rNotReadyServe\0208\022\033\n\027NotReadyCoor"
"tate\022\022\n\016IndexStateNone\020\000\022\014\n\010Unissued\020\001\022\016" "dActivating\0209\022\017\n\013DataCoordNA\020d\022\022\n\rDDRequ"
"\n\nInProgress\020\002\022\014\n\010Finished\020\003\022\n\n\006Failed\020\004" "estRace\020\350\007*c\n\nIndexState\022\022\n\016IndexStateNo"
"\022\t\n\005Retry\020\005*\202\001\n\014SegmentState\022\024\n\020SegmentS" "ne\020\000\022\014\n\010Unissued\020\001\022\016\n\nInProgress\020\002\022\014\n\010Fi"
"tateNone\020\000\022\014\n\010NotExist\020\001\022\013\n\007Growing\020\002\022\n\n" "nished\020\003\022\n\n\006Failed\020\004\022\t\n\005Retry\020\005*\202\001\n\014Segm"
"\006Sealed\020\003\022\013\n\007Flushed\020\004\022\014\n\010Flushing\020\005\022\013\n\007" "entState\022\024\n\020SegmentStateNone\020\000\022\014\n\010NotExi"
"Dropped\020\006\022\r\n\tImporting\020\007*i\n\017PlaceholderT" "st\020\001\022\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed"
"ype\022\010\n\004None\020\000\022\020\n\014BinaryVector\020d\022\017\n\013Float" "\020\004\022\014\n\010Flushing\020\005\022\013\n\007Dropped\020\006\022\r\n\tImporti"
"Vector\020e\022\021\n\rFloat16Vector\020f\022\t\n\005Int64\020\005\022\013" "ng\020\007*i\n\017PlaceholderType\022\010\n\004None\020\000\022\020\n\014Bin"
"\n\007VarChar\020\025*\264\020\n\007MsgType\022\r\n\tUndefined\020\000\022\024" "aryVector\020d\022\017\n\013FloatVector\020e\022\021\n\rFloat16V"
"\n\020CreateCollection\020d\022\022\n\016DropCollection\020e" "ector\020f\022\t\n\005Int64\020\005\022\013\n\007VarChar\020\025*\264\020\n\007MsgT"
"\022\021\n\rHasCollection\020f\022\026\n\022DescribeCollectio" "ype\022\r\n\tUndefined\020\000\022\024\n\020CreateCollection\020d"
"n\020g\022\023\n\017ShowCollections\020h\022\024\n\020GetSystemCon" "\022\022\n\016DropCollection\020e\022\021\n\rHasCollection\020f\022"
"figs\020i\022\022\n\016LoadCollection\020j\022\025\n\021ReleaseCol" "\026\n\022DescribeCollection\020g\022\023\n\017ShowCollectio"
"lection\020k\022\017\n\013CreateAlias\020l\022\r\n\tDropAlias\020" "ns\020h\022\024\n\020GetSystemConfigs\020i\022\022\n\016LoadCollec"
"m\022\016\n\nAlterAlias\020n\022\023\n\017AlterCollection\020o\022\024" "tion\020j\022\025\n\021ReleaseCollection\020k\022\017\n\013CreateA"
"\n\020RenameCollection\020p\022\021\n\rDescribeAlias\020q\022" "lias\020l\022\r\n\tDropAlias\020m\022\016\n\nAlterAlias\020n\022\023\n"
"\017\n\013ListAliases\020r\022\024\n\017CreatePartition\020\310\001\022\022" "\017AlterCollection\020o\022\024\n\020RenameCollection\020p"
"\n\rDropPartition\020\311\001\022\021\n\014HasPartition\020\312\001\022\026\n" "\022\021\n\rDescribeAlias\020q\022\017\n\013ListAliases\020r\022\024\n\017"
"\021DescribePartition\020\313\001\022\023\n\016ShowPartitions\020" "CreatePartition\020\310\001\022\022\n\rDropPartition\020\311\001\022\021"
"\314\001\022\023\n\016LoadPartitions\020\315\001\022\026\n\021ReleasePartit" "\n\014HasPartition\020\312\001\022\026\n\021DescribePartition\020\313"
"ions\020\316\001\022\021\n\014ShowSegments\020\372\001\022\024\n\017DescribeSe" "\001\022\023\n\016ShowPartitions\020\314\001\022\023\n\016LoadPartitions"
"gment\020\373\001\022\021\n\014LoadSegments\020\374\001\022\024\n\017ReleaseSe" "\020\315\001\022\026\n\021ReleasePartitions\020\316\001\022\021\n\014ShowSegme"
"gments\020\375\001\022\024\n\017HandoffSegments\020\376\001\022\030\n\023LoadB" "nts\020\372\001\022\024\n\017DescribeSegment\020\373\001\022\021\n\014LoadSegm"
"alanceSegments\020\377\001\022\025\n\020DescribeSegments\020\200\002" "ents\020\374\001\022\024\n\017ReleaseSegments\020\375\001\022\024\n\017Handoff"
"\022\034\n\027FederListIndexedSegment\020\201\002\022\"\n\035FederD" "Segments\020\376\001\022\030\n\023LoadBalanceSegments\020\377\001\022\025\n"
"escribeSegmentIndexData\020\202\002\022\020\n\013CreateInde" "\020DescribeSegments\020\200\002\022\034\n\027FederListIndexed"
"x\020\254\002\022\022\n\rDescribeIndex\020\255\002\022\016\n\tDropIndex\020\256\002" "Segment\020\201\002\022\"\n\035FederDescribeSegmentIndexD"
"\022\027\n\022GetIndexStatistics\020\257\002\022\013\n\006Insert\020\220\003\022\013" "ata\020\202\002\022\020\n\013CreateIndex\020\254\002\022\022\n\rDescribeInde"
"\n\006Delete\020\221\003\022\n\n\005Flush\020\222\003\022\027\n\022ResendSegment" "x\020\255\002\022\016\n\tDropIndex\020\256\002\022\027\n\022GetIndexStatisti"
"Stats\020\223\003\022\013\n\006Upsert\020\224\003\022\013\n\006Search\020\364\003\022\021\n\014Se" "cs\020\257\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005Flush"
"archResult\020\365\003\022\022\n\rGetIndexState\020\366\003\022\032\n\025Get" "\020\222\003\022\027\n\022ResendSegmentStats\020\223\003\022\013\n\006Upsert\020\224"
"IndexBuildProgress\020\367\003\022\034\n\027GetCollectionSt" "\003\022\013\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003\022\022\n\rGet"
"atistics\020\370\003\022\033\n\026GetPartitionStatistics\020\371\003" "IndexState\020\366\003\022\032\n\025GetIndexBuildProgress\020\367"
"\022\r\n\010Retrieve\020\372\003\022\023\n\016RetrieveResult\020\373\003\022\024\n\017" "\003\022\034\n\027GetCollectionStatistics\020\370\003\022\033\n\026GetPa"
"WatchDmChannels\020\374\003\022\025\n\020RemoveDmChannels\020\375" "rtitionStatistics\020\371\003\022\r\n\010Retrieve\020\372\003\022\023\n\016R"
"\003\022\027\n\022WatchQueryChannels\020\376\003\022\030\n\023RemoveQuer" "etrieveResult\020\373\003\022\024\n\017WatchDmChannels\020\374\003\022\025"
"yChannels\020\377\003\022\035\n\030SealedSegmentsChangeInfo" "\n\020RemoveDmChannels\020\375\003\022\027\n\022WatchQueryChann"
"\020\200\004\022\027\n\022WatchDeltaChannels\020\201\004\022\024\n\017GetShard" "els\020\376\003\022\030\n\023RemoveQueryChannels\020\377\003\022\035\n\030Seal"
"Leaders\020\202\004\022\020\n\013GetReplicas\020\203\004\022\023\n\016UnsubDmC" "edSegmentsChangeInfo\020\200\004\022\027\n\022WatchDeltaCha"
"hannel\020\204\004\022\024\n\017GetDistribution\020\205\004\022\025\n\020SyncD" "nnels\020\201\004\022\024\n\017GetShardLeaders\020\202\004\022\020\n\013GetRep"
"istribution\020\206\004\022\020\n\013SegmentInfo\020\330\004\022\017\n\nSyst" "licas\020\203\004\022\023\n\016UnsubDmChannel\020\204\004\022\024\n\017GetDist"
"emInfo\020\331\004\022\024\n\017GetRecoveryInfo\020\332\004\022\024\n\017GetSe" "ribution\020\205\004\022\025\n\020SyncDistribution\020\206\004\022\020\n\013Se"
"gmentState\020\333\004\022\r\n\010TimeTick\020\260\t\022\023\n\016QueryNod" "gmentInfo\020\330\004\022\017\n\nSystemInfo\020\331\004\022\024\n\017GetReco"
"eStats\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016\n\tRequestID\020\263" "veryInfo\020\332\004\022\024\n\017GetSegmentState\020\333\004\022\r\n\010Tim"
"\t\022\017\n\nRequestTSO\020\264\t\022\024\n\017AllocateSegment\020\265\t" "eTick\020\260\t\022\023\n\016QueryNodeStats\020\261\t\022\016\n\tLoadInd"
"\022\026\n\021SegmentStatistics\020\266\t\022\025\n\020SegmentFlush" "ex\020\262\t\022\016\n\tRequestID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024"
"Done\020\267\t\022\017\n\nDataNodeTt\020\270\t\022\014\n\007Connect\020\271\t\022\024" "\n\017AllocateSegment\020\265\t\022\026\n\021SegmentStatistic"
"\n\017ListClientInfos\020\272\t\022\023\n\016AllocTimestamp\020\273" "s\020\266\t\022\025\n\020SegmentFlushDone\020\267\t\022\017\n\nDataNodeT"
"\t\022\025\n\020CreateCredential\020\334\013\022\022\n\rGetCredentia" "t\020\270\t\022\014\n\007Connect\020\271\t\022\024\n\017ListClientInfos\020\272\t"
"l\020\335\013\022\025\n\020DeleteCredential\020\336\013\022\025\n\020UpdateCre" "\022\023\n\016AllocTimestamp\020\273\t\022\025\n\020CreateCredentia"
"dential\020\337\013\022\026\n\021ListCredUsernames\020\340\013\022\017\n\nCr" "l\020\334\013\022\022\n\rGetCredential\020\335\013\022\025\n\020DeleteCreden"
"eateRole\020\300\014\022\r\n\010DropRole\020\301\014\022\024\n\017OperateUse" "tial\020\336\013\022\025\n\020UpdateCredential\020\337\013\022\026\n\021ListCr"
"rRole\020\302\014\022\017\n\nSelectRole\020\303\014\022\017\n\nSelectUser\020" "edUsernames\020\340\013\022\017\n\nCreateRole\020\300\014\022\r\n\010DropR"
"\304\014\022\023\n\016SelectResource\020\305\014\022\025\n\020OperatePrivil" "ole\020\301\014\022\024\n\017OperateUserRole\020\302\014\022\017\n\nSelectRo"
"ege\020\306\014\022\020\n\013SelectGrant\020\307\014\022\033\n\026RefreshPolic" "le\020\303\014\022\017\n\nSelectUser\020\304\014\022\023\n\016SelectResource"
"yInfoCache\020\310\014\022\017\n\nListPolicy\020\311\014\022\030\n\023Create" "\020\305\014\022\025\n\020OperatePrivilege\020\306\014\022\020\n\013SelectGran"
"ResourceGroup\020\244\r\022\026\n\021DropResourceGroup\020\245\r" "t\020\307\014\022\033\n\026RefreshPolicyInfoCache\020\310\014\022\017\n\nLis"
"\022\027\n\022ListResourceGroups\020\246\r\022\032\n\025DescribeRes" "tPolicy\020\311\014\022\030\n\023CreateResourceGroup\020\244\r\022\026\n\021"
"ourceGroup\020\247\r\022\021\n\014TransferNode\020\250\r\022\024\n\017Tran" "DropResourceGroup\020\245\r\022\027\n\022ListResourceGrou"
"sferReplica\020\251\r\022\023\n\016CreateDatabase\020\211\016\022\021\n\014D" "ps\020\246\r\022\032\n\025DescribeResourceGroup\020\247\r\022\021\n\014Tra"
"ropDatabase\020\212\016\022\022\n\rListDatabases\020\213\016*\"\n\007Ds" "nsferNode\020\250\r\022\024\n\017TransferReplica\020\251\r\022\023\n\016Cr"
"lType\022\007\n\003Dsl\020\000\022\016\n\nBoolExprV1\020\001*B\n\017Compac" "eateDatabase\020\211\016\022\021\n\014DropDatabase\020\212\016\022\022\n\rLi"
"tionState\022\021\n\rUndefiedState\020\000\022\r\n\tExecutin" "stDatabases\020\213\016*\"\n\007DslType\022\007\n\003Dsl\020\000\022\016\n\nBo"
"g\020\001\022\r\n\tCompleted\020\002*X\n\020ConsistencyLevel\022\n" "olExprV1\020\001*B\n\017CompactionState\022\021\n\rUndefie"
"\n\006Strong\020\000\022\013\n\007Session\020\001\022\013\n\007Bounded\020\002\022\016\n\n" "dState\020\000\022\r\n\tExecuting\020\001\022\r\n\tCompleted\020\002*X"
"Eventually\020\003\022\016\n\nCustomized\020\004*\236\001\n\013ImportS" "\n\020ConsistencyLevel\022\n\n\006Strong\020\000\022\013\n\007Sessio"
"tate\022\021\n\rImportPending\020\000\022\020\n\014ImportFailed\020" "n\020\001\022\013\n\007Bounded\020\002\022\016\n\nEventually\020\003\022\016\n\nCust"
"\001\022\021\n\rImportStarted\020\002\022\023\n\017ImportPersisted\020" "omized\020\004*\236\001\n\013ImportState\022\021\n\rImportPendin"
"\005\022\021\n\rImportFlushed\020\010\022\023\n\017ImportCompleted\020" "g\020\000\022\020\n\014ImportFailed\020\001\022\021\n\rImportStarted\020\002"
"\006\022\032\n\026ImportFailedAndCleaned\020\007*2\n\nObjectT" "\022\023\n\017ImportPersisted\020\005\022\021\n\rImportFlushed\020\010"
"ype\022\016\n\nCollection\020\000\022\n\n\006Global\020\001\022\010\n\004User\020" "\022\023\n\017ImportCompleted\020\006\022\032\n\026ImportFailedAnd"
"\002*\241\010\n\017ObjectPrivilege\022\020\n\014PrivilegeAll\020\000\022" "Cleaned\020\007*2\n\nObjectType\022\016\n\nCollection\020\000\022"
"\035\n\031PrivilegeCreateCollection\020\001\022\033\n\027Privil" "\n\n\006Global\020\001\022\010\n\004User\020\002*\241\010\n\017ObjectPrivileg"
"egeDropCollection\020\002\022\037\n\033PrivilegeDescribe" "e\022\020\n\014PrivilegeAll\020\000\022\035\n\031PrivilegeCreateCo"
"Collection\020\003\022\034\n\030PrivilegeShowCollections" "llection\020\001\022\033\n\027PrivilegeDropCollection\020\002\022"
"\020\004\022\021\n\rPrivilegeLoad\020\005\022\024\n\020PrivilegeReleas" "\037\n\033PrivilegeDescribeCollection\020\003\022\034\n\030Priv"
"e\020\006\022\027\n\023PrivilegeCompaction\020\007\022\023\n\017Privileg" "ilegeShowCollections\020\004\022\021\n\rPrivilegeLoad\020"
"eInsert\020\010\022\023\n\017PrivilegeDelete\020\t\022\032\n\026Privil" "\005\022\024\n\020PrivilegeRelease\020\006\022\027\n\023PrivilegeComp"
"egeGetStatistics\020\n\022\030\n\024PrivilegeCreateInd" "action\020\007\022\023\n\017PrivilegeInsert\020\010\022\023\n\017Privile"
"ex\020\013\022\030\n\024PrivilegeIndexDetail\020\014\022\026\n\022Privil" "geDelete\020\t\022\032\n\026PrivilegeGetStatistics\020\n\022\030"
"egeDropIndex\020\r\022\023\n\017PrivilegeSearch\020\016\022\022\n\016P" "\n\024PrivilegeCreateIndex\020\013\022\030\n\024PrivilegeInd"
"rivilegeFlush\020\017\022\022\n\016PrivilegeQuery\020\020\022\030\n\024P" "exDetail\020\014\022\026\n\022PrivilegeDropIndex\020\r\022\023\n\017Pr"
"rivilegeLoadBalance\020\021\022\023\n\017PrivilegeImport" "ivilegeSearch\020\016\022\022\n\016PrivilegeFlush\020\017\022\022\n\016P"
"\020\022\022\034\n\030PrivilegeCreateOwnership\020\023\022\027\n\023Priv" "rivilegeQuery\020\020\022\030\n\024PrivilegeLoadBalance\020"
"ilegeUpdateUser\020\024\022\032\n\026PrivilegeDropOwners" "\021\022\023\n\017PrivilegeImport\020\022\022\034\n\030PrivilegeCreat"
"hip\020\025\022\034\n\030PrivilegeSelectOwnership\020\026\022\034\n\030P" "eOwnership\020\023\022\027\n\023PrivilegeUpdateUser\020\024\022\032\n"
"rivilegeManageOwnership\020\027\022\027\n\023PrivilegeSe" "\026PrivilegeDropOwnership\020\025\022\034\n\030PrivilegeSe"
"lectUser\020\030\022\023\n\017PrivilegeUpsert\020\031\022 \n\034Privi" "lectOwnership\020\026\022\034\n\030PrivilegeManageOwners"
"legeCreateResourceGroup\020\032\022\036\n\032PrivilegeDr" "hip\020\027\022\027\n\023PrivilegeSelectUser\020\030\022\023\n\017Privil"
"opResourceGroup\020\033\022\"\n\036PrivilegeDescribeRe" "egeUpsert\020\031\022 \n\034PrivilegeCreateResourceGr"
"sourceGroup\020\034\022\037\n\033PrivilegeListResourceGr" "oup\020\032\022\036\n\032PrivilegeDropResourceGroup\020\033\022\"\n"
"oups\020\035\022\031\n\025PrivilegeTransferNode\020\036\022\034\n\030Pri" "\036PrivilegeDescribeResourceGroup\020\034\022\037\n\033Pri"
"vilegeTransferReplica\020\037\022\037\n\033PrivilegeGetL" "vilegeListResourceGroups\020\035\022\031\n\025PrivilegeT"
"oadingProgress\020 \022\031\n\025PrivilegeGetLoadStat" "ransferNode\020\036\022\034\n\030PrivilegeTransferReplic"
"e\020!\022\035\n\031PrivilegeRenameCollection\020\"\022\033\n\027Pr" "a\020\037\022\037\n\033PrivilegeGetLoadingProgress\020 \022\031\n\025"
"ivilegeCreateDatabase\020#\022\031\n\025PrivilegeDrop" "PrivilegeGetLoadState\020!\022\035\n\031PrivilegeRena"
"Database\020$\022\032\n\026PrivilegeListDatabases\020%\022\025" "meCollection\020\"\022\033\n\027PrivilegeCreateDatabas"
"\n\021PrivilegeFlushAll\020&*S\n\tStateCode\022\020\n\014In" "e\020#\022\031\n\025PrivilegeDropDatabase\020$\022\032\n\026Privil"
"itializing\020\000\022\013\n\007Healthy\020\001\022\014\n\010Abnormal\020\002\022" "egeListDatabases\020%\022\025\n\021PrivilegeFlushAll\020"
"\013\n\007StandBy\020\003\022\014\n\010Stopping\020\004*c\n\tLoadState\022" "&*S\n\tStateCode\022\020\n\014Initializing\020\000\022\013\n\007Heal"
"\025\n\021LoadStateNotExist\020\000\022\024\n\020LoadStateNotLo" "thy\020\001\022\014\n\010Abnormal\020\002\022\013\n\007StandBy\020\003\022\014\n\010Stop"
"ad\020\001\022\024\n\020LoadStateLoading\020\002\022\023\n\017LoadStateL" "ping\020\004*c\n\tLoadState\022\025\n\021LoadStateNotExist"
"oaded\020\003:^\n\021privilege_ext_obj\022\037.google.pr" "\020\000\022\024\n\020LoadStateNotLoad\020\001\022\024\n\020LoadStateLoa"
"otobuf.MessageOptions\030\351\007 \001(\0132!.milvus.pr" "ding\020\002\022\023\n\017LoadStateLoaded\020\003:^\n\021privilege"
"oto.common.PrivilegeExtBm\n\016io.milvus.grp" "_ext_obj\022\037.google.protobuf.MessageOption"
"cB\013CommonProtoP\001Z4github.com/milvus-io/m" "s\030\351\007 \001(\0132!.milvus.proto.common.Privilege"
"ilvus-proto/go-api/v2/commonpb\240\001\001\252\002\022Milv" "ExtBm\n\016io.milvus.grpcB\013CommonProtoP\001Z4gi"
"us.Client.Grpcb\006proto3" "thub.com/milvus-io/milvus-proto/go-api/v"
"2/commonpb\240\001\001\252\002\022Milvus.Client.Grpcb\006prot"
"o3"
; ;
static const ::_pbi::DescriptorTable* const descriptor_table_common_2eproto_deps[1] = { static const ::_pbi::DescriptorTable* const descriptor_table_common_2eproto_deps[1] = {
&::descriptor_table_google_2fprotobuf_2fdescriptor_2eproto, &::descriptor_table_google_2fprotobuf_2fdescriptor_2eproto,
}; };
static ::_pbi::once_flag descriptor_table_common_2eproto_once; static ::_pbi::once_flag descriptor_table_common_2eproto_once;
const ::_pbi::DescriptorTable descriptor_table_common_2eproto = { const ::_pbi::DescriptorTable descriptor_table_common_2eproto = {
false, false, 7222, descriptor_table_protodef_common_2eproto, false, false, 7202, descriptor_table_protodef_common_2eproto,
"common.proto", "common.proto",
&descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_deps, 1, 17, &descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_deps, 1, 17,
schemas, file_default_instances, TableStruct_common_2eproto::offsets, schemas, file_default_instances, TableStruct_common_2eproto::offsets,
@ -2726,38 +2725,26 @@ void Address::InternalSwap(Address* other) {
// =================================================================== // ===================================================================
MsgBase_PropertiesEntry_DoNotUse::MsgBase_PropertiesEntry_DoNotUse() {}
MsgBase_PropertiesEntry_DoNotUse::MsgBase_PropertiesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
: SuperType(arena) {}
void MsgBase_PropertiesEntry_DoNotUse::MergeFrom(const MsgBase_PropertiesEntry_DoNotUse& other) {
MergeFromInternal(other);
}
::PROTOBUF_NAMESPACE_ID::Metadata MsgBase_PropertiesEntry_DoNotUse::GetMetadata() const {
return ::_pbi::AssignDescriptors(
&descriptor_table_common_2eproto_getter, &descriptor_table_common_2eproto_once,
file_level_metadata_common_2eproto[7]);
}
// ===================================================================
class MsgBase::_Internal { class MsgBase::_Internal {
public: public:
static const ::milvus::proto::common::ReplicateEntity& replicate(const MsgBase* msg);
}; };
const ::milvus::proto::common::ReplicateEntity&
MsgBase::_Internal::replicate(const MsgBase* msg) {
return *msg->_impl_.replicate_;
}
MsgBase::MsgBase(::PROTOBUF_NAMESPACE_ID::Arena* arena, MsgBase::MsgBase(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned) bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor(arena, is_message_owned); SharedCtor(arena, is_message_owned);
if (arena != nullptr && !is_message_owned) {
arena->OwnCustomDestructor(this, &MsgBase::ArenaDtor);
}
// @@protoc_insertion_point(arena_constructor:milvus.proto.common.MsgBase) // @@protoc_insertion_point(arena_constructor:milvus.proto.common.MsgBase)
} }
MsgBase::MsgBase(const MsgBase& from) MsgBase::MsgBase(const MsgBase& from)
: ::PROTOBUF_NAMESPACE_ID::Message() { : ::PROTOBUF_NAMESPACE_ID::Message() {
MsgBase* const _this = this; (void)_this; MsgBase* const _this = this; (void)_this;
new (&_impl_) Impl_{ new (&_impl_) Impl_{
/*decltype(_impl_.properties_)*/{} decltype(_impl_.replicate_){nullptr}
, decltype(_impl_.msgid_){} , decltype(_impl_.msgid_){}
, decltype(_impl_.timestamp_){} , decltype(_impl_.timestamp_){}
, decltype(_impl_.sourceid_){} , decltype(_impl_.sourceid_){}
@ -2766,7 +2753,9 @@ MsgBase::MsgBase(const MsgBase& from)
, /*decltype(_impl_._cached_size_)*/{}}; , /*decltype(_impl_._cached_size_)*/{}};
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
_this->_impl_.properties_.MergeFrom(from._impl_.properties_); if (from._internal_has_replicate()) {
_this->_impl_.replicate_ = new ::milvus::proto::common::ReplicateEntity(*from._impl_.replicate_);
}
::memcpy(&_impl_.msgid_, &from._impl_.msgid_, ::memcpy(&_impl_.msgid_, &from._impl_.msgid_,
static_cast<size_t>(reinterpret_cast<char*>(&_impl_.msg_type_) - static_cast<size_t>(reinterpret_cast<char*>(&_impl_.msg_type_) -
reinterpret_cast<char*>(&_impl_.msgid_)) + sizeof(_impl_.msg_type_)); reinterpret_cast<char*>(&_impl_.msgid_)) + sizeof(_impl_.msg_type_));
@ -2778,7 +2767,7 @@ inline void MsgBase::SharedCtor(
(void)arena; (void)arena;
(void)is_message_owned; (void)is_message_owned;
new (&_impl_) Impl_{ new (&_impl_) Impl_{
/*decltype(_impl_.properties_)*/{::_pbi::ArenaInitialized(), arena} decltype(_impl_.replicate_){nullptr}
, decltype(_impl_.msgid_){int64_t{0}} , decltype(_impl_.msgid_){int64_t{0}}
, decltype(_impl_.timestamp_){uint64_t{0u}} , decltype(_impl_.timestamp_){uint64_t{0u}}
, decltype(_impl_.sourceid_){int64_t{0}} , decltype(_impl_.sourceid_){int64_t{0}}
@ -2792,7 +2781,6 @@ MsgBase::~MsgBase() {
// @@protoc_insertion_point(destructor:milvus.proto.common.MsgBase) // @@protoc_insertion_point(destructor:milvus.proto.common.MsgBase)
if (auto *arena = _internal_metadata_.DeleteReturnArena<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>()) { if (auto *arena = _internal_metadata_.DeleteReturnArena<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>()) {
(void)arena; (void)arena;
ArenaDtor(this);
return; return;
} }
SharedDtor(); SharedDtor();
@ -2800,14 +2788,9 @@ MsgBase::~MsgBase() {
inline void MsgBase::SharedDtor() { inline void MsgBase::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
_impl_.properties_.Destruct(); if (this != internal_default_instance()) delete _impl_.replicate_;
_impl_.properties_.~MapField();
} }
void MsgBase::ArenaDtor(void* object) {
MsgBase* _this = reinterpret_cast< MsgBase* >(object);
_this->_impl_.properties_.Destruct();
}
void MsgBase::SetCachedSize(int size) const { void MsgBase::SetCachedSize(int size) const {
_impl_._cached_size_.Set(size); _impl_._cached_size_.Set(size);
} }
@ -2818,7 +2801,10 @@ void MsgBase::Clear() {
// Prevent compiler warnings about cached_has_bits being unused // Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits; (void) cached_has_bits;
_impl_.properties_.Clear(); if (GetArenaForAllocation() == nullptr && _impl_.replicate_ != nullptr) {
delete _impl_.replicate_;
}
_impl_.replicate_ = nullptr;
::memset(&_impl_.msgid_, 0, static_cast<size_t>( ::memset(&_impl_.msgid_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&_impl_.msg_type_) - reinterpret_cast<char*>(&_impl_.msg_type_) -
reinterpret_cast<char*>(&_impl_.msgid_)) + sizeof(_impl_.msg_type_)); reinterpret_cast<char*>(&_impl_.msgid_)) + sizeof(_impl_.msg_type_));
@ -2872,16 +2858,11 @@ const char* MsgBase::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx)
} else } else
goto handle_unusual; goto handle_unusual;
continue; continue;
// map<string, string> properties = 6; // .milvus.proto.common.ReplicateEntity replicate = 6;
case 6: case 6:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 50)) { if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 50)) {
ptr -= 1; ptr = ctx->ParseMessage(_internal_mutable_replicate(), ptr);
do { CHK_(ptr);
ptr += 1;
ptr = ctx->ParseMessage(&_impl_.properties_, ptr);
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr));
} else } else
goto handle_unusual; goto handle_unusual;
continue; continue;
@ -2945,34 +2926,11 @@ uint8_t* MsgBase::_InternalSerialize(
target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_targetid(), target); target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_targetid(), target);
} }
// map<string, string> properties = 6; // .milvus.proto.common.ReplicateEntity replicate = 6;
if (!this->_internal_properties().empty()) { if (this->_internal_has_replicate()) {
using MapType = ::_pb::Map<std::string, std::string>; target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
using WireHelper = MsgBase_PropertiesEntry_DoNotUse::Funcs; InternalWriteMessage(6, _Internal::replicate(this),
const auto& map_field = this->_internal_properties(); _Internal::replicate(this).GetCachedSize(), target, stream);
auto check_utf8 = [](const MapType::value_type& entry) {
(void)entry;
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
entry.first.data(), static_cast<int>(entry.first.length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.common.MsgBase.PropertiesEntry.key");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
entry.second.data(), static_cast<int>(entry.second.length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.common.MsgBase.PropertiesEntry.value");
};
if (stream->IsSerializationDeterministic() && map_field.size() > 1) {
for (const auto& entry : ::_pbi::MapSorterPtr<MapType>(map_field)) {
target = WireHelper::InternalSerialize(6, entry.first, entry.second, target, stream);
check_utf8(entry);
}
} else {
for (const auto& entry : map_field) {
target = WireHelper::InternalSerialize(6, entry.first, entry.second, target, stream);
check_utf8(entry);
}
}
} }
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
@ -2991,13 +2949,11 @@ size_t MsgBase::ByteSizeLong() const {
// Prevent compiler warnings about cached_has_bits being unused // Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits; (void) cached_has_bits;
// map<string, string> properties = 6; // .milvus.proto.common.ReplicateEntity replicate = 6;
total_size += 1 * if (this->_internal_has_replicate()) {
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_properties_size()); total_size += 1 +
for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
it = this->_internal_properties().begin(); *_impl_.replicate_);
it != this->_internal_properties().end(); ++it) {
total_size += MsgBase_PropertiesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
} }
// int64 msgID = 2; // int64 msgID = 2;
@ -3044,7 +3000,10 @@ void MsgBase::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOB
uint32_t cached_has_bits = 0; uint32_t cached_has_bits = 0;
(void) cached_has_bits; (void) cached_has_bits;
_this->_impl_.properties_.MergeFrom(from._impl_.properties_); if (from._internal_has_replicate()) {
_this->_internal_mutable_replicate()->::milvus::proto::common::ReplicateEntity::MergeFrom(
from._internal_replicate());
}
if (from._internal_msgid() != 0) { if (from._internal_msgid() != 0) {
_this->_internal_set_msgid(from._internal_msgid()); _this->_internal_set_msgid(from._internal_msgid());
} }
@ -3077,16 +3036,193 @@ bool MsgBase::IsInitialized() const {
void MsgBase::InternalSwap(MsgBase* other) { void MsgBase::InternalSwap(MsgBase* other) {
using std::swap; using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_); _internal_metadata_.InternalSwap(&other->_internal_metadata_);
_impl_.properties_.InternalSwap(&other->_impl_.properties_);
::PROTOBUF_NAMESPACE_ID::internal::memswap< ::PROTOBUF_NAMESPACE_ID::internal::memswap<
PROTOBUF_FIELD_OFFSET(MsgBase, _impl_.msg_type_) PROTOBUF_FIELD_OFFSET(MsgBase, _impl_.msg_type_)
+ sizeof(MsgBase::_impl_.msg_type_) + sizeof(MsgBase::_impl_.msg_type_)
- PROTOBUF_FIELD_OFFSET(MsgBase, _impl_.msgid_)>( - PROTOBUF_FIELD_OFFSET(MsgBase, _impl_.replicate_)>(
reinterpret_cast<char*>(&_impl_.msgid_), reinterpret_cast<char*>(&_impl_.replicate_),
reinterpret_cast<char*>(&other->_impl_.msgid_)); reinterpret_cast<char*>(&other->_impl_.replicate_));
} }
::PROTOBUF_NAMESPACE_ID::Metadata MsgBase::GetMetadata() const { ::PROTOBUF_NAMESPACE_ID::Metadata MsgBase::GetMetadata() const {
return ::_pbi::AssignDescriptors(
&descriptor_table_common_2eproto_getter, &descriptor_table_common_2eproto_once,
file_level_metadata_common_2eproto[7]);
}
// ===================================================================
class ReplicateEntity::_Internal {
public:
};
ReplicateEntity::ReplicateEntity(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned)
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
SharedCtor(arena, is_message_owned);
// @@protoc_insertion_point(arena_constructor:milvus.proto.common.ReplicateEntity)
}
ReplicateEntity::ReplicateEntity(const ReplicateEntity& from)
: ::PROTOBUF_NAMESPACE_ID::Message() {
ReplicateEntity* const _this = this; (void)_this;
new (&_impl_) Impl_{
decltype(_impl_.isreplicate_){}
, /*decltype(_impl_._cached_size_)*/{}};
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
_this->_impl_.isreplicate_ = from._impl_.isreplicate_;
// @@protoc_insertion_point(copy_constructor:milvus.proto.common.ReplicateEntity)
}
inline void ReplicateEntity::SharedCtor(
::_pb::Arena* arena, bool is_message_owned) {
(void)arena;
(void)is_message_owned;
new (&_impl_) Impl_{
decltype(_impl_.isreplicate_){false}
, /*decltype(_impl_._cached_size_)*/{}
};
}
ReplicateEntity::~ReplicateEntity() {
// @@protoc_insertion_point(destructor:milvus.proto.common.ReplicateEntity)
if (auto *arena = _internal_metadata_.DeleteReturnArena<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>()) {
(void)arena;
return;
}
SharedDtor();
}
inline void ReplicateEntity::SharedDtor() {
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
}
void ReplicateEntity::SetCachedSize(int size) const {
_impl_._cached_size_.Set(size);
}
void ReplicateEntity::Clear() {
// @@protoc_insertion_point(message_clear_start:milvus.proto.common.ReplicateEntity)
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
_impl_.isreplicate_ = false;
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
}
const char* ReplicateEntity::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
uint32_t tag;
ptr = ::_pbi::ReadTag(ptr, &tag);
switch (tag >> 3) {
// bool isReplicate = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 8)) {
_impl_.isreplicate_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
CHK_(ptr);
} else
goto handle_unusual;
continue;
default:
goto handle_unusual;
} // switch
handle_unusual:
if ((tag == 0) || ((tag & 7) == 4)) {
CHK_(ptr);
ctx->SetLastTag(tag);
goto message_done;
}
ptr = UnknownFieldParse(
tag,
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
ptr, ctx);
CHK_(ptr != nullptr);
} // while
message_done:
return ptr;
failure:
ptr = nullptr;
goto message_done;
#undef CHK_
}
uint8_t* ReplicateEntity::_InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
// @@protoc_insertion_point(serialize_to_array_start:milvus.proto.common.ReplicateEntity)
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
// bool isReplicate = 1;
if (this->_internal_isreplicate() != 0) {
target = stream->EnsureSpace(target);
target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_isreplicate(), target);
}
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
target = ::_pbi::WireFormat::InternalSerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
}
// @@protoc_insertion_point(serialize_to_array_end:milvus.proto.common.ReplicateEntity)
return target;
}
size_t ReplicateEntity::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:milvus.proto.common.ReplicateEntity)
size_t total_size = 0;
uint32_t cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// bool isReplicate = 1;
if (this->_internal_isreplicate() != 0) {
total_size += 1 + 1;
}
return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_);
}
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ReplicateEntity::_class_data_ = {
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSourceCheck,
ReplicateEntity::MergeImpl
};
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ReplicateEntity::GetClassData() const { return &_class_data_; }
void ReplicateEntity::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg) {
auto* const _this = static_cast<ReplicateEntity*>(&to_msg);
auto& from = static_cast<const ReplicateEntity&>(from_msg);
// @@protoc_insertion_point(class_specific_merge_from_start:milvus.proto.common.ReplicateEntity)
GOOGLE_DCHECK_NE(&from, _this);
uint32_t cached_has_bits = 0;
(void) cached_has_bits;
if (from._internal_isreplicate() != 0) {
_this->_internal_set_isreplicate(from._internal_isreplicate());
}
_this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
}
void ReplicateEntity::CopyFrom(const ReplicateEntity& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:milvus.proto.common.ReplicateEntity)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool ReplicateEntity::IsInitialized() const {
return true;
}
void ReplicateEntity::InternalSwap(ReplicateEntity* other) {
using std::swap;
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
swap(_impl_.isreplicate_, other->_impl_.isreplicate_);
}
::PROTOBUF_NAMESPACE_ID::Metadata ReplicateEntity::GetMetadata() const {
return ::_pbi::AssignDescriptors( return ::_pbi::AssignDescriptors(
&descriptor_table_common_2eproto_getter, &descriptor_table_common_2eproto_once, &descriptor_table_common_2eproto_getter, &descriptor_table_common_2eproto_once,
file_level_metadata_common_2eproto[8]); file_level_metadata_common_2eproto[8]);
@ -5012,14 +5148,14 @@ template<> PROTOBUF_NOINLINE ::milvus::proto::common::Address*
Arena::CreateMaybeMessage< ::milvus::proto::common::Address >(Arena* arena) { Arena::CreateMaybeMessage< ::milvus::proto::common::Address >(Arena* arena) {
return Arena::CreateMessageInternal< ::milvus::proto::common::Address >(arena); return Arena::CreateMessageInternal< ::milvus::proto::common::Address >(arena);
} }
template<> PROTOBUF_NOINLINE ::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse*
Arena::CreateMaybeMessage< ::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse >(Arena* arena) {
return Arena::CreateMessageInternal< ::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::common::MsgBase* template<> PROTOBUF_NOINLINE ::milvus::proto::common::MsgBase*
Arena::CreateMaybeMessage< ::milvus::proto::common::MsgBase >(Arena* arena) { Arena::CreateMaybeMessage< ::milvus::proto::common::MsgBase >(Arena* arena) {
return Arena::CreateMessageInternal< ::milvus::proto::common::MsgBase >(arena); return Arena::CreateMessageInternal< ::milvus::proto::common::MsgBase >(arena);
} }
template<> PROTOBUF_NOINLINE ::milvus::proto::common::ReplicateEntity*
Arena::CreateMaybeMessage< ::milvus::proto::common::ReplicateEntity >(Arena* arena) {
return Arena::CreateMessageInternal< ::milvus::proto::common::ReplicateEntity >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::common::MsgHeader* template<> PROTOBUF_NOINLINE ::milvus::proto::common::MsgHeader*
Arena::CreateMaybeMessage< ::milvus::proto::common::MsgHeader >(Arena* arena) { Arena::CreateMaybeMessage< ::milvus::proto::common::MsgHeader >(Arena* arena) {
return Arena::CreateMessageInternal< ::milvus::proto::common::MsgHeader >(arena); return Arena::CreateMessageInternal< ::milvus::proto::common::MsgHeader >(arena);

View File

@ -76,9 +76,6 @@ extern KeyValuePairDefaultTypeInternal _KeyValuePair_default_instance_;
class MsgBase; class MsgBase;
struct MsgBaseDefaultTypeInternal; struct MsgBaseDefaultTypeInternal;
extern MsgBaseDefaultTypeInternal _MsgBase_default_instance_; extern MsgBaseDefaultTypeInternal _MsgBase_default_instance_;
class MsgBase_PropertiesEntry_DoNotUse;
struct MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal;
extern MsgBase_PropertiesEntry_DoNotUseDefaultTypeInternal _MsgBase_PropertiesEntry_DoNotUse_default_instance_;
class MsgHeader; class MsgHeader;
struct MsgHeaderDefaultTypeInternal; struct MsgHeaderDefaultTypeInternal;
extern MsgHeaderDefaultTypeInternal _MsgHeader_default_instance_; extern MsgHeaderDefaultTypeInternal _MsgHeader_default_instance_;
@ -91,6 +88,9 @@ extern PlaceholderValueDefaultTypeInternal _PlaceholderValue_default_instance_;
class PrivilegeExt; class PrivilegeExt;
struct PrivilegeExtDefaultTypeInternal; struct PrivilegeExtDefaultTypeInternal;
extern PrivilegeExtDefaultTypeInternal _PrivilegeExt_default_instance_; extern PrivilegeExtDefaultTypeInternal _PrivilegeExt_default_instance_;
class ReplicateEntity;
struct ReplicateEntityDefaultTypeInternal;
extern ReplicateEntityDefaultTypeInternal _ReplicateEntity_default_instance_;
class SegmentStats; class SegmentStats;
struct SegmentStatsDefaultTypeInternal; struct SegmentStatsDefaultTypeInternal;
extern SegmentStatsDefaultTypeInternal _SegmentStats_default_instance_; extern SegmentStatsDefaultTypeInternal _SegmentStats_default_instance_;
@ -115,11 +115,11 @@ template<> ::milvus::proto::common::DMLMsgHeader* Arena::CreateMaybeMessage<::mi
template<> ::milvus::proto::common::KeyDataPair* Arena::CreateMaybeMessage<::milvus::proto::common::KeyDataPair>(Arena*); template<> ::milvus::proto::common::KeyDataPair* Arena::CreateMaybeMessage<::milvus::proto::common::KeyDataPair>(Arena*);
template<> ::milvus::proto::common::KeyValuePair* Arena::CreateMaybeMessage<::milvus::proto::common::KeyValuePair>(Arena*); template<> ::milvus::proto::common::KeyValuePair* Arena::CreateMaybeMessage<::milvus::proto::common::KeyValuePair>(Arena*);
template<> ::milvus::proto::common::MsgBase* Arena::CreateMaybeMessage<::milvus::proto::common::MsgBase>(Arena*); template<> ::milvus::proto::common::MsgBase* Arena::CreateMaybeMessage<::milvus::proto::common::MsgBase>(Arena*);
template<> ::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse* Arena::CreateMaybeMessage<::milvus::proto::common::MsgBase_PropertiesEntry_DoNotUse>(Arena*);
template<> ::milvus::proto::common::MsgHeader* Arena::CreateMaybeMessage<::milvus::proto::common::MsgHeader>(Arena*); template<> ::milvus::proto::common::MsgHeader* Arena::CreateMaybeMessage<::milvus::proto::common::MsgHeader>(Arena*);
template<> ::milvus::proto::common::PlaceholderGroup* Arena::CreateMaybeMessage<::milvus::proto::common::PlaceholderGroup>(Arena*); template<> ::milvus::proto::common::PlaceholderGroup* Arena::CreateMaybeMessage<::milvus::proto::common::PlaceholderGroup>(Arena*);
template<> ::milvus::proto::common::PlaceholderValue* Arena::CreateMaybeMessage<::milvus::proto::common::PlaceholderValue>(Arena*); template<> ::milvus::proto::common::PlaceholderValue* Arena::CreateMaybeMessage<::milvus::proto::common::PlaceholderValue>(Arena*);
template<> ::milvus::proto::common::PrivilegeExt* Arena::CreateMaybeMessage<::milvus::proto::common::PrivilegeExt>(Arena*); template<> ::milvus::proto::common::PrivilegeExt* Arena::CreateMaybeMessage<::milvus::proto::common::PrivilegeExt>(Arena*);
template<> ::milvus::proto::common::ReplicateEntity* Arena::CreateMaybeMessage<::milvus::proto::common::ReplicateEntity>(Arena*);
template<> ::milvus::proto::common::SegmentStats* Arena::CreateMaybeMessage<::milvus::proto::common::SegmentStats>(Arena*); template<> ::milvus::proto::common::SegmentStats* Arena::CreateMaybeMessage<::milvus::proto::common::SegmentStats>(Arena*);
template<> ::milvus::proto::common::ServerInfo* Arena::CreateMaybeMessage<::milvus::proto::common::ServerInfo>(Arena*); template<> ::milvus::proto::common::ServerInfo* Arena::CreateMaybeMessage<::milvus::proto::common::ServerInfo>(Arena*);
template<> ::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse* Arena::CreateMaybeMessage<::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse>(Arena*); template<> ::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse* Arena::CreateMaybeMessage<::milvus::proto::common::ServerInfo_ReservedEntry_DoNotUse>(Arena*);
@ -1854,34 +1854,6 @@ class Address final :
}; };
// ------------------------------------------------------------------- // -------------------------------------------------------------------
class MsgBase_PropertiesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry<MsgBase_PropertiesEntry_DoNotUse,
std::string, std::string,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> {
public:
typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry<MsgBase_PropertiesEntry_DoNotUse,
std::string, std::string,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> SuperType;
MsgBase_PropertiesEntry_DoNotUse();
explicit PROTOBUF_CONSTEXPR MsgBase_PropertiesEntry_DoNotUse(
::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
explicit MsgBase_PropertiesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena);
void MergeFrom(const MsgBase_PropertiesEntry_DoNotUse& other);
static const MsgBase_PropertiesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast<const MsgBase_PropertiesEntry_DoNotUse*>(&_MsgBase_PropertiesEntry_DoNotUse_default_instance_); }
static bool ValidateKey(std::string* s) {
return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "milvus.proto.common.MsgBase.PropertiesEntry.key");
}
static bool ValidateValue(std::string* s) {
return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast<int>(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "milvus.proto.common.MsgBase.PropertiesEntry.value");
}
using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
friend struct ::TableStruct_common_2eproto;
};
// -------------------------------------------------------------------
class MsgBase final : class MsgBase final :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.common.MsgBase) */ { public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.common.MsgBase) */ {
public: public:
@ -1930,7 +1902,7 @@ class MsgBase final :
&_MsgBase_default_instance_); &_MsgBase_default_instance_);
} }
static constexpr int kIndexInFileMessages = static constexpr int kIndexInFileMessages =
8; 7;
friend void swap(MsgBase& a, MsgBase& b) { friend void swap(MsgBase& a, MsgBase& b) {
a.Swap(&b); a.Swap(&b);
@ -1991,8 +1963,6 @@ class MsgBase final :
protected: protected:
explicit MsgBase(::PROTOBUF_NAMESPACE_ID::Arena* arena, explicit MsgBase(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned = false); bool is_message_owned = false);
private:
static void ArenaDtor(void* object);
public: public:
static const ClassData _class_data_; static const ClassData _class_data_;
@ -2002,33 +1972,33 @@ class MsgBase final :
// nested types ---------------------------------------------------- // nested types ----------------------------------------------------
// accessors ------------------------------------------------------- // accessors -------------------------------------------------------
enum : int { enum : int {
kPropertiesFieldNumber = 6, kReplicateFieldNumber = 6,
kMsgIDFieldNumber = 2, kMsgIDFieldNumber = 2,
kTimestampFieldNumber = 3, kTimestampFieldNumber = 3,
kSourceIDFieldNumber = 4, kSourceIDFieldNumber = 4,
kTargetIDFieldNumber = 5, kTargetIDFieldNumber = 5,
kMsgTypeFieldNumber = 1, kMsgTypeFieldNumber = 1,
}; };
// map<string, string> properties = 6; // .milvus.proto.common.ReplicateEntity replicate = 6;
int properties_size() const; bool has_replicate() const;
private: private:
int _internal_properties_size() const; bool _internal_has_replicate() const;
public: public:
void clear_properties(); void clear_replicate();
const ::milvus::proto::common::ReplicateEntity& replicate() const;
PROTOBUF_NODISCARD ::milvus::proto::common::ReplicateEntity* release_replicate();
::milvus::proto::common::ReplicateEntity* mutable_replicate();
void set_allocated_replicate(::milvus::proto::common::ReplicateEntity* replicate);
private: private:
const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& const ::milvus::proto::common::ReplicateEntity& _internal_replicate() const;
_internal_properties() const; ::milvus::proto::common::ReplicateEntity* _internal_mutable_replicate();
::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >*
_internal_mutable_properties();
public: public:
const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& void unsafe_arena_set_allocated_replicate(
properties() const; ::milvus::proto::common::ReplicateEntity* replicate);
::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* ::milvus::proto::common::ReplicateEntity* unsafe_arena_release_replicate();
mutable_properties();
// int64 msgID = 2; // int64 msgID = 2;
void clear_msgid(); void clear_msgid();
@ -2083,11 +2053,7 @@ class MsgBase final :
typedef void InternalArenaConstructable_; typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_; typedef void DestructorSkippable_;
struct Impl_ { struct Impl_ {
::PROTOBUF_NAMESPACE_ID::internal::MapField< ::milvus::proto::common::ReplicateEntity* replicate_;
MsgBase_PropertiesEntry_DoNotUse,
std::string, std::string,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> properties_;
int64_t msgid_; int64_t msgid_;
uint64_t timestamp_; uint64_t timestamp_;
int64_t sourceid_; int64_t sourceid_;
@ -2100,6 +2066,154 @@ class MsgBase final :
}; };
// ------------------------------------------------------------------- // -------------------------------------------------------------------
class ReplicateEntity final :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.common.ReplicateEntity) */ {
public:
inline ReplicateEntity() : ReplicateEntity(nullptr) {}
~ReplicateEntity() override;
explicit PROTOBUF_CONSTEXPR ReplicateEntity(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
ReplicateEntity(const ReplicateEntity& from);
ReplicateEntity(ReplicateEntity&& from) noexcept
: ReplicateEntity() {
*this = ::std::move(from);
}
inline ReplicateEntity& operator=(const ReplicateEntity& from) {
CopyFrom(from);
return *this;
}
inline ReplicateEntity& operator=(ReplicateEntity&& from) noexcept {
if (this == &from) return *this;
if (GetOwningArena() == from.GetOwningArena()
#ifdef PROTOBUF_FORCE_COPY_IN_MOVE
&& GetOwningArena() != nullptr
#endif // !PROTOBUF_FORCE_COPY_IN_MOVE
) {
InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return default_instance().GetMetadata().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return default_instance().GetMetadata().reflection;
}
static const ReplicateEntity& default_instance() {
return *internal_default_instance();
}
static inline const ReplicateEntity* internal_default_instance() {
return reinterpret_cast<const ReplicateEntity*>(
&_ReplicateEntity_default_instance_);
}
static constexpr int kIndexInFileMessages =
8;
friend void swap(ReplicateEntity& a, ReplicateEntity& b) {
a.Swap(&b);
}
inline void Swap(ReplicateEntity* other) {
if (other == this) return;
#ifdef PROTOBUF_FORCE_COPY_IN_SWAP
if (GetOwningArena() != nullptr &&
GetOwningArena() == other->GetOwningArena()) {
#else // PROTOBUF_FORCE_COPY_IN_SWAP
if (GetOwningArena() == other->GetOwningArena()) {
#endif // !PROTOBUF_FORCE_COPY_IN_SWAP
InternalSwap(other);
} else {
::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
}
}
void UnsafeArenaSwap(ReplicateEntity* other) {
if (other == this) return;
GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
InternalSwap(other);
}
// implements Message ----------------------------------------------
ReplicateEntity* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
return CreateMaybeMessage<ReplicateEntity>(arena);
}
using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
void CopyFrom(const ReplicateEntity& from);
using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
void MergeFrom( const ReplicateEntity& from) {
ReplicateEntity::MergeImpl(*this, from);
}
private:
static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg);
public:
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
uint8_t* _InternalSerialize(
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
private:
void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(ReplicateEntity* other);
private:
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.common.ReplicateEntity";
}
protected:
explicit ReplicateEntity(::PROTOBUF_NAMESPACE_ID::Arena* arena,
bool is_message_owned = false);
public:
static const ClassData _class_data_;
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kIsReplicateFieldNumber = 1,
};
// bool isReplicate = 1;
void clear_isreplicate();
bool isreplicate() const;
void set_isreplicate(bool value);
private:
bool _internal_isreplicate() const;
void _internal_set_isreplicate(bool value);
public:
// @@protoc_insertion_point(class_scope:milvus.proto.common.ReplicateEntity)
private:
class _Internal;
template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
typedef void InternalArenaConstructable_;
typedef void DestructorSkippable_;
struct Impl_ {
bool isreplicate_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
};
union { Impl_ _impl_; };
friend struct ::TableStruct_common_2eproto;
};
// -------------------------------------------------------------------
class MsgHeader final : class MsgHeader final :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.common.MsgHeader) */ { public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.common.MsgHeader) */ {
public: public:
@ -3946,8 +4060,6 @@ inline void Address::set_port(int64_t value) {
// ------------------------------------------------------------------- // -------------------------------------------------------------------
// -------------------------------------------------------------------
// MsgBase // MsgBase
// .milvus.proto.common.MsgType msg_type = 1; // .milvus.proto.common.MsgType msg_type = 1;
@ -4050,33 +4162,118 @@ inline void MsgBase::set_targetid(int64_t value) {
// @@protoc_insertion_point(field_set:milvus.proto.common.MsgBase.targetID) // @@protoc_insertion_point(field_set:milvus.proto.common.MsgBase.targetID)
} }
// map<string, string> properties = 6; // .milvus.proto.common.ReplicateEntity replicate = 6;
inline int MsgBase::_internal_properties_size() const { inline bool MsgBase::_internal_has_replicate() const {
return _impl_.properties_.size(); return this != internal_default_instance() && _impl_.replicate_ != nullptr;
} }
inline int MsgBase::properties_size() const { inline bool MsgBase::has_replicate() const {
return _internal_properties_size(); return _internal_has_replicate();
} }
inline void MsgBase::clear_properties() { inline void MsgBase::clear_replicate() {
_impl_.properties_.Clear(); if (GetArenaForAllocation() == nullptr && _impl_.replicate_ != nullptr) {
delete _impl_.replicate_;
}
_impl_.replicate_ = nullptr;
} }
inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& inline const ::milvus::proto::common::ReplicateEntity& MsgBase::_internal_replicate() const {
MsgBase::_internal_properties() const { const ::milvus::proto::common::ReplicateEntity* p = _impl_.replicate_;
return _impl_.properties_.GetMap(); return p != nullptr ? *p : reinterpret_cast<const ::milvus::proto::common::ReplicateEntity&>(
::milvus::proto::common::_ReplicateEntity_default_instance_);
} }
inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& inline const ::milvus::proto::common::ReplicateEntity& MsgBase::replicate() const {
MsgBase::properties() const { // @@protoc_insertion_point(field_get:milvus.proto.common.MsgBase.replicate)
// @@protoc_insertion_point(field_map:milvus.proto.common.MsgBase.properties) return _internal_replicate();
return _internal_properties();
} }
inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* inline void MsgBase::unsafe_arena_set_allocated_replicate(
MsgBase::_internal_mutable_properties() { ::milvus::proto::common::ReplicateEntity* replicate) {
return _impl_.properties_.MutableMap(); if (GetArenaForAllocation() == nullptr) {
delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.replicate_);
}
_impl_.replicate_ = replicate;
if (replicate) {
} else {
}
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:milvus.proto.common.MsgBase.replicate)
} }
inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* inline ::milvus::proto::common::ReplicateEntity* MsgBase::release_replicate() {
MsgBase::mutable_properties() {
// @@protoc_insertion_point(field_mutable_map:milvus.proto.common.MsgBase.properties) ::milvus::proto::common::ReplicateEntity* temp = _impl_.replicate_;
return _internal_mutable_properties(); _impl_.replicate_ = nullptr;
#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE
auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp);
temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
if (GetArenaForAllocation() == nullptr) { delete old; }
#else // PROTOBUF_FORCE_COPY_IN_RELEASE
if (GetArenaForAllocation() != nullptr) {
temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
}
#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE
return temp;
}
inline ::milvus::proto::common::ReplicateEntity* MsgBase::unsafe_arena_release_replicate() {
// @@protoc_insertion_point(field_release:milvus.proto.common.MsgBase.replicate)
::milvus::proto::common::ReplicateEntity* temp = _impl_.replicate_;
_impl_.replicate_ = nullptr;
return temp;
}
inline ::milvus::proto::common::ReplicateEntity* MsgBase::_internal_mutable_replicate() {
if (_impl_.replicate_ == nullptr) {
auto* p = CreateMaybeMessage<::milvus::proto::common::ReplicateEntity>(GetArenaForAllocation());
_impl_.replicate_ = p;
}
return _impl_.replicate_;
}
inline ::milvus::proto::common::ReplicateEntity* MsgBase::mutable_replicate() {
::milvus::proto::common::ReplicateEntity* _msg = _internal_mutable_replicate();
// @@protoc_insertion_point(field_mutable:milvus.proto.common.MsgBase.replicate)
return _msg;
}
inline void MsgBase::set_allocated_replicate(::milvus::proto::common::ReplicateEntity* replicate) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation();
if (message_arena == nullptr) {
delete _impl_.replicate_;
}
if (replicate) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena =
::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(replicate);
if (message_arena != submessage_arena) {
replicate = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, replicate, submessage_arena);
}
} else {
}
_impl_.replicate_ = replicate;
// @@protoc_insertion_point(field_set_allocated:milvus.proto.common.MsgBase.replicate)
}
// -------------------------------------------------------------------
// ReplicateEntity
// bool isReplicate = 1;
inline void ReplicateEntity::clear_isreplicate() {
_impl_.isreplicate_ = false;
}
inline bool ReplicateEntity::_internal_isreplicate() const {
return _impl_.isreplicate_;
}
inline bool ReplicateEntity::isreplicate() const {
// @@protoc_insertion_point(field_get:milvus.proto.common.ReplicateEntity.isReplicate)
return _internal_isreplicate();
}
inline void ReplicateEntity::_internal_set_isreplicate(bool value) {
_impl_.isreplicate_ = value;
}
inline void ReplicateEntity::set_isreplicate(bool value) {
_internal_set_isreplicate(value);
// @@protoc_insertion_point(field_set:milvus.proto.common.ReplicateEntity.isReplicate)
} }
// ------------------------------------------------------------------- // -------------------------------------------------------------------

View File

@ -1063,7 +1063,7 @@ func (s *Server) stopServerLoop() {
s.serverLoopWg.Wait() s.serverLoopWg.Wait()
} }
//func (s *Server) validateAllocRequest(collID UniqueID, partID UniqueID, channelName string) error { // func (s *Server) validateAllocRequest(collID UniqueID, partID UniqueID, channelName string) error {
// if !s.meta.HasCollection(collID) { // if !s.meta.HasCollection(collID) {
// return fmt.Errorf("can not find collection %d", collID) // return fmt.Errorf("can not find collection %d", collID)
// } // }
@ -1076,7 +1076,7 @@ func (s *Server) stopServerLoop() {
// } // }
// } // }
// return fmt.Errorf("can not find channel %s", channelName) // return fmt.Errorf("can not find channel %s", channelName)
//} // }
// loadCollectionFromRootCoord communicates with RootCoord and asks for collection information. // loadCollectionFromRootCoord communicates with RootCoord and asks for collection information.
// collection information will be added to server meta info. // collection information will be added to server meta info.

View File

@ -103,6 +103,9 @@ func (mtm *mockTtMsgStream) CheckTopicValid(channel string) error {
return nil return nil
} }
func (mtm *mockTtMsgStream) EnableProduce(can bool) {
}
func TestNewDmInputNode(t *testing.T) { func TestNewDmInputNode(t *testing.T) {
client := msgdispatcher.NewClient(&mockMsgStreamFactory{}, typeutil.DataNodeRole, paramtable.GetNodeID()) client := msgdispatcher.NewClient(&mockMsgStreamFactory{}, typeutil.DataNodeRole, paramtable.GetNodeID())
_, err := newDmInputNode(context.Background(), client, new(msgpb.MsgPosition), &nodeConfig{ _, err := newDmInputNode(context.Background(), client, new(msgpb.MsgPosition), &nodeConfig{

View File

@ -464,6 +464,7 @@ func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID,
).WithRateGroup("ibNode.sync", 1, 60) ).WithRateGroup("ibNode.sync", 1, 60)
// check if segment is syncing // check if segment is syncing
segment := ibNode.channel.getSegment(task.segmentID) segment := ibNode.channel.getSegment(task.segmentID)
if !task.dropped && !task.flushed && segment.isSyncing() { if !task.dropped && !task.flushed && segment.isSyncing() {
log.RatedInfo(10, "segment is syncing, skip it") log.RatedInfo(10, "segment is syncing, skip it")
continue continue
@ -739,7 +740,8 @@ func newInsertBufferNode(
wTt.AsProducer([]string{Params.CommonCfg.DataCoordTimeTick.GetValue()}) wTt.AsProducer([]string{Params.CommonCfg.DataCoordTimeTick.GetValue()})
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc() metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
log.Info("datanode AsProducer", zap.String("TimeTickChannelName", Params.CommonCfg.DataCoordTimeTick.GetValue())) log.Info("datanode AsProducer", zap.String("TimeTickChannelName", Params.CommonCfg.DataCoordTimeTick.GetValue()))
var wTtMsgStream msgstream.MsgStream = wTt wTtMsgStream := wTt
wTtMsgStream.EnableProduce(true)
mt := newMergedTimeTickerSender(func(ts Timestamp, segmentIDs []int64) error { mt := newMergedTimeTickerSender(func(ts Timestamp, segmentIDs []int64) error {
stats := make([]*commonpb.SegmentStats, 0, len(segmentIDs)) stats := make([]*commonpb.SegmentStats, 0, len(segmentIDs))

View File

@ -63,7 +63,7 @@ type flushTaskRunner struct {
segmentID UniqueID segmentID UniqueID
insertLogs map[UniqueID]*datapb.Binlog insertLogs map[UniqueID]*datapb.Binlog
statsLogs map[UniqueID]*datapb.Binlog statsLogs map[UniqueID]*datapb.Binlog
deltaLogs []*datapb.Binlog //[]*DelDataBuf deltaLogs []*datapb.Binlog // []*DelDataBuf
pos *msgpb.MsgPosition pos *msgpb.MsgPosition
flushed bool flushed bool
dropped bool dropped bool
@ -156,7 +156,7 @@ func (t *flushTaskRunner) runFlushInsert(task flushInsertTask,
func (t *flushTaskRunner) runFlushDel(task flushDeleteTask, deltaLogs *DelDataBuf, opts ...retry.Option) { func (t *flushTaskRunner) runFlushDel(task flushDeleteTask, deltaLogs *DelDataBuf, opts ...retry.Option) {
t.deleteOnce.Do(func() { t.deleteOnce.Do(func() {
if deltaLogs == nil { if deltaLogs == nil {
t.deltaLogs = nil //[]*DelDataBuf{} t.deltaLogs = nil // []*DelDataBuf{}
} else { } else {
t.deltaLogs = []*datapb.Binlog{ t.deltaLogs = []*datapb.Binlog{
{ {

View File

@ -1109,3 +1109,7 @@ func (s *Server) ListDatabases(ctx context.Context, request *milvuspb.ListDataba
func (s *Server) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimestampRequest) (*milvuspb.AllocTimestampResponse, error) { func (s *Server) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimestampRequest) (*milvuspb.AllocTimestampResponse, error) {
return s.proxy.AllocTimestamp(ctx, req) return s.proxy.AllocTimestamp(ctx, req)
} }
func (s *Server) ReplicateMessage(ctx context.Context, req *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error) {
return s.proxy.ReplicateMessage(ctx, req)
}

View File

@ -508,7 +508,11 @@ func (m *MockProxy) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimes
return nil, nil return nil, nil
} }
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// func (m *MockProxy) ReplicateMessage(ctx context.Context, req *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error) {
return nil, nil
}
// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
type WaitOption struct { type WaitOption struct {
Duration time.Duration `json:"duration"` Duration time.Duration `json:"duration"`

View File

@ -4445,6 +4445,61 @@ func (_c *MockProxy_RenameCollection_Call) RunAndReturn(run func(context.Context
return _c return _c
} }
// ReplicateMessage provides a mock function with given fields: ctx, req
func (_m *MockProxy) ReplicateMessage(ctx context.Context, req *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error) {
ret := _m.Called(ctx, req)
var r0 *milvuspb.ReplicateMessageResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error)); ok {
return rf(ctx, req)
}
if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.ReplicateMessageRequest) *milvuspb.ReplicateMessageResponse); ok {
r0 = rf(ctx, req)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*milvuspb.ReplicateMessageResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *milvuspb.ReplicateMessageRequest) error); ok {
r1 = rf(ctx, req)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockProxy_ReplicateMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicateMessage'
type MockProxy_ReplicateMessage_Call struct {
*mock.Call
}
// ReplicateMessage is a helper method to define mock.On call
// - ctx context.Context
// - req *milvuspb.ReplicateMessageRequest
func (_e *MockProxy_Expecter) ReplicateMessage(ctx interface{}, req interface{}) *MockProxy_ReplicateMessage_Call {
return &MockProxy_ReplicateMessage_Call{Call: _e.mock.On("ReplicateMessage", ctx, req)}
}
func (_c *MockProxy_ReplicateMessage_Call) Run(run func(ctx context.Context, req *milvuspb.ReplicateMessageRequest)) *MockProxy_ReplicateMessage_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*milvuspb.ReplicateMessageRequest))
})
return _c
}
func (_c *MockProxy_ReplicateMessage_Call) Return(_a0 *milvuspb.ReplicateMessageResponse, _a1 error) *MockProxy_ReplicateMessage_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockProxy_ReplicateMessage_Call) RunAndReturn(run func(context.Context, *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error)) *MockProxy_ReplicateMessage_Call {
_c.Call.Return(run)
return _c
}
// Search provides a mock function with given fields: _a0, _a1 // Search provides a mock function with given fields: _a0, _a1
func (_m *MockProxy) Search(_a0 context.Context, _a1 *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) { func (_m *MockProxy) Search(_a0 context.Context, _a1 *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
ret := _m.Called(_a0, _a1) ret := _m.Called(_a0, _a1)

View File

@ -237,6 +237,7 @@ func (BinaryExpr_BinaryOp) EnumDescriptor() ([]byte, []int) {
type GenericValue struct { type GenericValue struct {
// Types that are valid to be assigned to Val: // Types that are valid to be assigned to Val:
//
// *GenericValue_BoolVal // *GenericValue_BoolVal
// *GenericValue_Int64Val // *GenericValue_Int64Val
// *GenericValue_FloatVal // *GenericValue_FloatVal
@ -1297,6 +1298,7 @@ var xxx_messageInfo_AlwaysTrueExpr proto.InternalMessageInfo
type Expr struct { type Expr struct {
// Types that are valid to be assigned to Expr: // Types that are valid to be assigned to Expr:
//
// *Expr_TermExpr // *Expr_TermExpr
// *Expr_UnaryExpr // *Expr_UnaryExpr
// *Expr_BinaryExpr // *Expr_BinaryExpr
@ -1668,6 +1670,7 @@ func (m *QueryPlanNode) GetLimit() int64 {
type PlanNode struct { type PlanNode struct {
// Types that are valid to be assigned to Node: // Types that are valid to be assigned to Node:
//
// *PlanNode_VectorAnns // *PlanNode_VectorAnns
// *PlanNode_Predicates // *PlanNode_Predicates
// *PlanNode_Query // *PlanNode_Query

View File

@ -29,8 +29,9 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type InvalidateCollMetaCacheRequest struct { type InvalidateCollMetaCacheRequest struct {
// MsgType: // MsgType:
// DropCollection -> {meta cache, dml channels} //
// Other -> {meta cache} // DropCollection -> {meta cache, dml channels}
// Other -> {meta cache}
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"`
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"` CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`

View File

@ -793,28 +793,28 @@ type RootCoordClient interface {
GetComponentStates(ctx context.Context, in *milvuspb.GetComponentStatesRequest, opts ...grpc.CallOption) (*milvuspb.ComponentStates, error) GetComponentStates(ctx context.Context, in *milvuspb.GetComponentStatesRequest, opts ...grpc.CallOption) (*milvuspb.ComponentStates, error)
GetTimeTickChannel(ctx context.Context, in *internalpb.GetTimeTickChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) GetTimeTickChannel(ctx context.Context, in *internalpb.GetTimeTickChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
GetStatisticsChannel(ctx context.Context, in *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) GetStatisticsChannel(ctx context.Context, in *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
//* // *
// @brief This method is used to create collection // @brief This method is used to create collection
// //
// @param CreateCollectionRequest, use to provide collection information to be created. // @param CreateCollectionRequest, use to provide collection information to be created.
// //
// @return Status // @return Status
CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to delete collection. // @brief This method is used to delete collection.
// //
// @param DropCollectionRequest, collection name is going to be deleted. // @param DropCollectionRequest, collection name is going to be deleted.
// //
// @return Status // @return Status
DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to test collection existence. // @brief This method is used to test collection existence.
// //
// @param HasCollectionRequest, collection name is going to be tested. // @param HasCollectionRequest, collection name is going to be tested.
// //
// @return BoolResponse // @return BoolResponse
HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error)
//* // *
// @brief This method is used to get collection schema. // @brief This method is used to get collection schema.
// //
// @param DescribeCollectionRequest, target collection name. // @param DescribeCollectionRequest, target collection name.
@ -825,28 +825,28 @@ type RootCoordClient interface {
CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to list all collections. // @brief This method is used to list all collections.
// //
// @return StringListResponse, collection name list // @return StringListResponse, collection name list
ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowCollectionsResponse, error)
AlterCollection(ctx context.Context, in *milvuspb.AlterCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) AlterCollection(ctx context.Context, in *milvuspb.AlterCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to create partition // @brief This method is used to create partition
// //
// @return Status // @return Status
CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to drop partition // @brief This method is used to drop partition
// //
// @return Status // @return Status
DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
//* // *
// @brief This method is used to test partition existence. // @brief This method is used to test partition existence.
// //
// @return BoolResponse // @return BoolResponse
HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest, opts ...grpc.CallOption) (*milvuspb.BoolResponse, error)
//* // *
// @brief This method is used to show partition information // @brief This method is used to show partition information
// //
// @param ShowPartitionRequest, target collection name. // @param ShowPartitionRequest, target collection name.
@ -854,7 +854,7 @@ type RootCoordClient interface {
// @return StringListResponse // @return StringListResponse
ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error)
ShowPartitionsInternal(ctx context.Context, in *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error) ShowPartitionsInternal(ctx context.Context, in *milvuspb.ShowPartitionsRequest, opts ...grpc.CallOption) (*milvuspb.ShowPartitionsResponse, error)
// rpc DescribeSegment(milvus.DescribeSegmentRequest) returns (milvus.DescribeSegmentResponse) {} // rpc DescribeSegment(milvus.DescribeSegmentRequest) returns (milvus.DescribeSegmentResponse) {}
ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest, opts ...grpc.CallOption) (*milvuspb.ShowSegmentsResponse, error) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest, opts ...grpc.CallOption) (*milvuspb.ShowSegmentsResponse, error)
AllocTimestamp(ctx context.Context, in *AllocTimestampRequest, opts ...grpc.CallOption) (*AllocTimestampResponse, error) AllocTimestamp(ctx context.Context, in *AllocTimestampRequest, opts ...grpc.CallOption) (*AllocTimestampResponse, error)
AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error)
@ -1327,28 +1327,28 @@ type RootCoordServer interface {
GetComponentStates(context.Context, *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) GetComponentStates(context.Context, *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error)
GetTimeTickChannel(context.Context, *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error) GetTimeTickChannel(context.Context, *internalpb.GetTimeTickChannelRequest) (*milvuspb.StringResponse, error)
GetStatisticsChannel(context.Context, *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error) GetStatisticsChannel(context.Context, *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error)
//* // *
// @brief This method is used to create collection // @brief This method is used to create collection
// //
// @param CreateCollectionRequest, use to provide collection information to be created. // @param CreateCollectionRequest, use to provide collection information to be created.
// //
// @return Status // @return Status
CreateCollection(context.Context, *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) CreateCollection(context.Context, *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to delete collection. // @brief This method is used to delete collection.
// //
// @param DropCollectionRequest, collection name is going to be deleted. // @param DropCollectionRequest, collection name is going to be deleted.
// //
// @return Status // @return Status
DropCollection(context.Context, *milvuspb.DropCollectionRequest) (*commonpb.Status, error) DropCollection(context.Context, *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to test collection existence. // @brief This method is used to test collection existence.
// //
// @param HasCollectionRequest, collection name is going to be tested. // @param HasCollectionRequest, collection name is going to be tested.
// //
// @return BoolResponse // @return BoolResponse
HasCollection(context.Context, *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) HasCollection(context.Context, *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
//* // *
// @brief This method is used to get collection schema. // @brief This method is used to get collection schema.
// //
// @param DescribeCollectionRequest, target collection name. // @param DescribeCollectionRequest, target collection name.
@ -1359,28 +1359,28 @@ type RootCoordServer interface {
CreateAlias(context.Context, *milvuspb.CreateAliasRequest) (*commonpb.Status, error) CreateAlias(context.Context, *milvuspb.CreateAliasRequest) (*commonpb.Status, error)
DropAlias(context.Context, *milvuspb.DropAliasRequest) (*commonpb.Status, error) DropAlias(context.Context, *milvuspb.DropAliasRequest) (*commonpb.Status, error)
AlterAlias(context.Context, *milvuspb.AlterAliasRequest) (*commonpb.Status, error) AlterAlias(context.Context, *milvuspb.AlterAliasRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to list all collections. // @brief This method is used to list all collections.
// //
// @return StringListResponse, collection name list // @return StringListResponse, collection name list
ShowCollections(context.Context, *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) ShowCollections(context.Context, *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error)
AlterCollection(context.Context, *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) AlterCollection(context.Context, *milvuspb.AlterCollectionRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to create partition // @brief This method is used to create partition
// //
// @return Status // @return Status
CreatePartition(context.Context, *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) CreatePartition(context.Context, *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to drop partition // @brief This method is used to drop partition
// //
// @return Status // @return Status
DropPartition(context.Context, *milvuspb.DropPartitionRequest) (*commonpb.Status, error) DropPartition(context.Context, *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
//* // *
// @brief This method is used to test partition existence. // @brief This method is used to test partition existence.
// //
// @return BoolResponse // @return BoolResponse
HasPartition(context.Context, *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) HasPartition(context.Context, *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
//* // *
// @brief This method is used to show partition information // @brief This method is used to show partition information
// //
// @param ShowPartitionRequest, target collection name. // @param ShowPartitionRequest, target collection name.
@ -1388,7 +1388,7 @@ type RootCoordServer interface {
// @return StringListResponse // @return StringListResponse
ShowPartitions(context.Context, *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) ShowPartitions(context.Context, *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error)
ShowPartitionsInternal(context.Context, *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) ShowPartitionsInternal(context.Context, *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error)
// rpc DescribeSegment(milvus.DescribeSegmentRequest) returns (milvus.DescribeSegmentResponse) {} // rpc DescribeSegment(milvus.DescribeSegmentRequest) returns (milvus.DescribeSegmentResponse) {}
ShowSegments(context.Context, *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) ShowSegments(context.Context, *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error)
AllocTimestamp(context.Context, *AllocTimestampRequest) (*AllocTimestampResponse, error) AllocTimestamp(context.Context, *AllocTimestampRequest) (*AllocTimestampResponse, error)
AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error) AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error)

View File

@ -18,6 +18,7 @@ package proxy
import ( import (
"context" "context"
"encoding/base64"
"fmt" "fmt"
"os" "os"
"strconv" "strconv"
@ -156,6 +157,7 @@ func (node *Proxy) CreateDatabase(ctx context.Context, request *milvuspb.CreateD
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
CreateDatabaseRequest: request, CreateDatabaseRequest: request,
rootCoord: node.rootCoord, rootCoord: node.rootCoord,
replicateMsgStream: node.replicateMsgStream,
} }
log := log.With( log := log.With(
@ -216,6 +218,7 @@ func (node *Proxy) DropDatabase(ctx context.Context, request *milvuspb.DropDatab
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
DropDatabaseRequest: request, DropDatabaseRequest: request,
rootCoord: node.rootCoord, rootCoord: node.rootCoord,
replicateMsgStream: node.replicateMsgStream,
} }
log := log.With( log := log.With(
@ -580,6 +583,7 @@ func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCol
LoadCollectionRequest: request, LoadCollectionRequest: request,
queryCoord: node.queryCoord, queryCoord: node.queryCoord,
datacoord: node.dataCoord, datacoord: node.dataCoord,
replicateMsgStream: node.replicateMsgStream,
} }
log := log.Ctx(ctx).With( log := log.Ctx(ctx).With(
@ -652,7 +656,7 @@ func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.Rele
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
ReleaseCollectionRequest: request, ReleaseCollectionRequest: request,
queryCoord: node.queryCoord, queryCoord: node.queryCoord,
chMgr: node.chMgr, replicateMsgStream: node.replicateMsgStream,
} }
log := log.Ctx(ctx).With( log := log.Ctx(ctx).With(
@ -1746,11 +1750,12 @@ func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateInde
defer sp.End() defer sp.End()
cit := &createIndexTask{ cit := &createIndexTask{
ctx: ctx, ctx: ctx,
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
req: request, req: request,
rootCoord: node.rootCoord, rootCoord: node.rootCoord,
datacoord: node.dataCoord, datacoord: node.dataCoord,
replicateMsgStream: node.replicateMsgStream,
} }
method := "CreateIndex" method := "CreateIndex"
@ -1964,11 +1969,12 @@ func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexReq
defer sp.End() defer sp.End()
dit := &dropIndexTask{ dit := &dropIndexTask{
ctx: ctx, ctx: ctx,
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
DropIndexRequest: request, DropIndexRequest: request,
dataCoord: node.dataCoord, dataCoord: node.dataCoord,
queryCoord: node.queryCoord, queryCoord: node.queryCoord,
replicateMsgStream: node.replicateMsgStream,
} }
method := "DropIndex" method := "DropIndex"
@ -2710,10 +2716,11 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
defer sp.End() defer sp.End()
ft := &flushTask{ ft := &flushTask{
ctx: ctx, ctx: ctx,
Condition: NewTaskCondition(ctx), Condition: NewTaskCondition(ctx),
FlushRequest: request, FlushRequest: request,
dataCoord: node.dataCoord, dataCoord: node.dataCoord,
replicateMsgStream: node.replicateMsgStream,
} }
method := "Flush" method := "Flush"
@ -5023,6 +5030,104 @@ func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest
}, nil }, nil
} }
func (node *Proxy) ReplicateMessage(ctx context.Context, req *milvuspb.ReplicateMessageRequest) (*milvuspb.ReplicateMessageResponse, error) {
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(err)}, nil
}
if paramtable.Get().CommonCfg.TTMsgEnabled.GetAsBool() {
return &milvuspb.ReplicateMessageResponse{
Status: merr.Status(merr.ErrDenyReplicateMessage),
}, nil
}
var err error
ctxLog := log.Ctx(ctx)
if req.GetChannelName() == "" {
ctxLog.Warn("channel name is empty")
return &milvuspb.ReplicateMessageResponse{
Status: merr.Status(merr.WrapErrParameterInvalidMsg("invalid channel name for the replicate message request")),
}, nil
}
// get the latest position of the replicate msg channel
replicateMsgChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
if req.GetChannelName() == replicateMsgChannel {
msgID, err := msgstream.GetChannelLatestMsgID(ctx, node.factory, replicateMsgChannel)
if err != nil {
ctxLog.Warn("failed to get the latest message id of the replicate msg channel", zap.Error(err))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(err)}, nil
}
position := base64.StdEncoding.EncodeToString(msgID)
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(nil), Position: position}, nil
}
msgPack := &msgstream.MsgPack{
BeginTs: req.BeginTs,
EndTs: req.EndTs,
Msgs: make([]msgstream.TsMsg, 0),
StartPositions: req.StartPositions,
EndPositions: req.EndPositions,
}
// getTsMsgFromConsumerMsg
for i, msgBytes := range req.Msgs {
header := commonpb.MsgHeader{}
err = proto.Unmarshal(msgBytes, &header)
if err != nil {
ctxLog.Warn("failed to unmarshal msg header", zap.Int("index", i), zap.Error(err))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(err)}, nil
}
if header.GetBase() == nil {
ctxLog.Warn("msg header base is nil", zap.Int("index", i))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(merr.ErrInvalidMsgBytes)}, nil
}
tsMsg, err := node.replicateStreamManager.GetMsgDispatcher().Unmarshal(msgBytes, header.GetBase().GetMsgType())
if err != nil {
ctxLog.Warn("failed to unmarshal msg", zap.Int("index", i), zap.Error(err))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(merr.ErrInvalidMsgBytes)}, nil
}
switch realMsg := tsMsg.(type) {
case *msgstream.InsertMsg:
assignedSegmentInfos, err := node.segAssigner.GetSegmentID(realMsg.GetCollectionID(), realMsg.GetPartitionID(),
realMsg.GetShardName(), uint32(realMsg.NumRows), req.EndTs)
if err != nil {
ctxLog.Warn("failed to get segment id", zap.Error(err))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(err)}, nil
}
if len(assignedSegmentInfos) == 0 {
ctxLog.Warn("no segment id assigned")
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(merr.ErrNoAssignSegmentID)}, nil
}
for assignSegmentID := range assignedSegmentInfos {
realMsg.SegmentID = assignSegmentID
break
}
}
msgPack.Msgs = append(msgPack.Msgs, tsMsg)
}
msgStream, err := node.replicateStreamManager.GetReplicateMsgStream(ctx, req.ChannelName)
if err != nil {
ctxLog.Warn("failed to get msg stream from the replicate stream manager", zap.Error(err))
return &milvuspb.ReplicateMessageResponse{
Status: merr.Status(err),
}, nil
}
messageIDsMap, err := msgStream.Broadcast(msgPack)
if err != nil {
ctxLog.Warn("failed to produce msg", zap.Error(err))
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(err)}, nil
}
var position string
if len(messageIDsMap[req.GetChannelName()]) == 0 {
ctxLog.Warn("no message id returned")
} else {
messageIDs := messageIDsMap[req.GetChannelName()]
position = base64.StdEncoding.EncodeToString(messageIDs[len(messageIDs)-1].Serialize())
}
return &milvuspb.ReplicateMessageResponse{Status: merr.Status(nil), Position: position}, nil
}
func (node *Proxy) ListClientInfos(ctx context.Context, req *proxypb.ListClientInfosRequest) (*proxypb.ListClientInfosResponse, error) { func (node *Proxy) ListClientInfos(ctx context.Context, req *proxypb.ListClientInfosRequest) (*proxypb.ListClientInfosResponse, error) {
if err := merr.CheckHealthy(node.GetStateCode()); err != nil { if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
return &proxypb.ListClientInfosResponse{Status: merr.Status(err)}, nil return &proxypb.ListClientInfosResponse{Status: merr.Status(err)}, nil

View File

@ -18,16 +18,20 @@ package proxy
import ( import (
"context" "context"
"encoding/base64"
"testing" "testing"
"time"
"github.com/cockroachdb/errors" "github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/internal/proto/proxypb"
@ -35,8 +39,13 @@ import (
"github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/util/dependency" "github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr" "github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable" "github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/resource"
) )
func TestProxy_InvalidateCollectionMetaCache_remove_stream(t *testing.T) { func TestProxy_InvalidateCollectionMetaCache_remove_stream(t *testing.T) {
@ -402,6 +411,10 @@ func TestProxy_FlushAll_DbCollection(t *testing.T) {
node.tsoAllocator = &timestampAllocator{ node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(), tso: newMockTimestampAllocatorInterface(),
} }
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer([]string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000") Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory) node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
@ -441,6 +454,10 @@ func TestProxy_FlushAll(t *testing.T) {
node.tsoAllocator = &timestampAllocator{ node.tsoAllocator = &timestampAllocator{
tso: newMockTimestampAllocatorInterface(), tso: newMockTimestampAllocatorInterface(),
} }
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer([]string{rpcRequestChannel})
Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000") Params.Save(Params.ProxyCfg.MaxTaskNum.Key, "1000")
node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory) node.sched, err = newTaskScheduler(ctx, node.tsoAllocator, node.factory)
@ -909,6 +926,11 @@ func TestProxyCreateDatabase(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
defer node.sched.Close() defer node.sched.Close()
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer([]string{rpcRequestChannel})
t.Run("create database fail", func(t *testing.T) { t.Run("create database fail", func(t *testing.T) {
rc := mocks.NewMockRootCoordClient(t) rc := mocks.NewMockRootCoordClient(t)
rc.On("CreateDatabase", mock.Anything, mock.Anything). rc.On("CreateDatabase", mock.Anything, mock.Anything).
@ -963,6 +985,11 @@ func TestProxyDropDatabase(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
defer node.sched.Close() defer node.sched.Close()
rpcRequestChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
assert.NoError(t, err)
node.replicateMsgStream.AsProducer([]string{rpcRequestChannel})
t.Run("drop database fail", func(t *testing.T) { t.Run("drop database fail", func(t *testing.T) {
rc := mocks.NewMockRootCoordClient(t) rc := mocks.NewMockRootCoordClient(t)
rc.On("DropDatabase", mock.Anything, mock.Anything). rc.On("DropDatabase", mock.Anything, mock.Anything).
@ -1092,3 +1119,261 @@ func TestProxy_AllocTimestamp(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode()) assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
}) })
} }
func TestProxy_ReplicateMessage(t *testing.T) {
paramtable.Init()
defer paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "true")
t.Run("proxy unhealthy", func(t *testing.T) {
node := &Proxy{}
node.UpdateStateCode(commonpb.StateCode_Abnormal)
resp, err := node.ReplicateMessage(context.TODO(), nil)
assert.NoError(t, err)
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
})
t.Run("not backup instance", func(t *testing.T) {
node := &Proxy{}
node.UpdateStateCode(commonpb.StateCode_Healthy)
resp, err := node.ReplicateMessage(context.TODO(), nil)
assert.NoError(t, err)
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
})
t.Run("empty channel name", func(t *testing.T) {
node := &Proxy{}
node.UpdateStateCode(commonpb.StateCode_Healthy)
paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
resp, err := node.ReplicateMessage(context.TODO(), nil)
assert.NoError(t, err)
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
})
t.Run("fail to get msg stream", func(t *testing.T) {
factory := newMockMsgStreamFactory()
factory.f = func(ctx context.Context) (msgstream.MsgStream, error) {
return nil, errors.New("mock error: get msg stream")
}
resourceManager := resource.NewManager(time.Second, 2*time.Second, nil)
manager := NewReplicateStreamManager(context.Background(), factory, resourceManager)
node := &Proxy{
replicateStreamManager: manager,
}
node.UpdateStateCode(commonpb.StateCode_Healthy)
paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
resp, err := node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{ChannelName: "unit_test_replicate_message"})
assert.NoError(t, err)
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
})
t.Run("get latest position", func(t *testing.T) {
paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
defer paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "true")
factory := dependency.NewMockFactory(t)
stream := msgstream.NewMockMsgStream(t)
mockMsgID := mqwrapper.NewMockMessageID(t)
factory.EXPECT().NewMsgStream(mock.Anything).Return(stream, nil).Once()
mockMsgID.EXPECT().Serialize().Return([]byte("mock")).Once()
stream.EXPECT().AsConsumer(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
stream.EXPECT().GetLatestMsgID(mock.Anything).Return(mockMsgID, nil).Once()
stream.EXPECT().Close().Return()
node := &Proxy{
factory: factory,
}
node.UpdateStateCode(commonpb.StateCode_Healthy)
resp, err := node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{
ChannelName: Params.CommonCfg.ReplicateMsgChannel.GetValue(),
})
assert.NoError(t, err)
assert.EqualValues(t, 0, resp.GetStatus().GetCode())
assert.Equal(t, base64.StdEncoding.EncodeToString([]byte("mock")), resp.GetPosition())
factory.EXPECT().NewMsgStream(mock.Anything).Return(nil, errors.New("mock")).Once()
resp, err = node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{
ChannelName: Params.CommonCfg.ReplicateMsgChannel.GetValue(),
})
assert.NoError(t, err)
assert.NotEqualValues(t, 0, resp.GetStatus().GetCode())
})
t.Run("invalid msg pack", func(t *testing.T) {
node := &Proxy{
replicateStreamManager: NewReplicateStreamManager(context.Background(), nil, nil),
}
node.UpdateStateCode(commonpb.StateCode_Healthy)
paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
{
resp, err := node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{
ChannelName: "unit_test_replicate_message",
Msgs: [][]byte{{1, 2, 3}},
})
assert.NoError(t, err)
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
}
{
timeTickResult := msgpb.TimeTickMsg{}
timeTickMsg := &msgstream.TimeTickMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: 1,
EndTimestamp: 10,
HashValues: []uint32{0},
},
TimeTickMsg: timeTickResult,
}
msgBytes, _ := timeTickMsg.Marshal(timeTickMsg)
resp, err := node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{
ChannelName: "unit_test_replicate_message",
Msgs: [][]byte{msgBytes.([]byte)},
})
assert.NoError(t, err)
log.Info("resp", zap.Any("resp", resp))
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
}
{
timeTickResult := msgpb.TimeTickMsg{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType(-1)),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(10),
commonpbutil.WithSourceID(-1),
),
}
timeTickMsg := &msgstream.TimeTickMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: 1,
EndTimestamp: 10,
HashValues: []uint32{0},
},
TimeTickMsg: timeTickResult,
}
msgBytes, _ := timeTickMsg.Marshal(timeTickMsg)
resp, err := node.ReplicateMessage(context.TODO(), &milvuspb.ReplicateMessageRequest{
ChannelName: "unit_test_replicate_message",
Msgs: [][]byte{msgBytes.([]byte)},
})
assert.NoError(t, err)
log.Info("resp", zap.Any("resp", resp))
assert.NotEqual(t, 0, resp.GetStatus().GetCode())
}
})
t.Run("success", func(t *testing.T) {
paramtable.Init()
factory := newMockMsgStreamFactory()
msgStreamObj := msgstream.NewMockMsgStream(t)
msgStreamObj.EXPECT().SetRepackFunc(mock.Anything).Return()
msgStreamObj.EXPECT().AsProducer(mock.Anything).Return()
msgStreamObj.EXPECT().EnableProduce(mock.Anything).Return()
msgStreamObj.EXPECT().Close().Return()
mockMsgID1 := mqwrapper.NewMockMessageID(t)
mockMsgID2 := mqwrapper.NewMockMessageID(t)
mockMsgID2.EXPECT().Serialize().Return([]byte("mock message id 2"))
broadcastMock := msgStreamObj.EXPECT().Broadcast(mock.Anything).Return(map[string][]mqwrapper.MessageID{
"unit_test_replicate_message": {mockMsgID1, mockMsgID2},
}, nil)
factory.f = func(ctx context.Context) (msgstream.MsgStream, error) {
return msgStreamObj, nil
}
resourceManager := resource.NewManager(time.Second, 2*time.Second, nil)
manager := NewReplicateStreamManager(context.Background(), factory, resourceManager)
ctx := context.Background()
dataCoord := &mockDataCoord{}
dataCoord.expireTime = Timestamp(1000)
segAllocator, err := newSegIDAssigner(ctx, dataCoord, getLastTick1)
assert.NoError(t, err)
segAllocator.Start()
node := &Proxy{
replicateStreamManager: manager,
segAssigner: segAllocator,
}
node.UpdateStateCode(commonpb.StateCode_Healthy)
paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
insertMsg := &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: 4,
EndTimestamp: 10,
HashValues: []uint32{0},
MsgPosition: &msgstream.MsgPosition{
ChannelName: "unit_test_replicate_message",
MsgID: []byte("mock message id 2"),
},
},
InsertRequest: msgpb.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Insert,
MsgID: 10001,
Timestamp: 10,
SourceID: -1,
},
ShardName: "unit_test_replicate_message_v1",
DbName: "default",
CollectionName: "foo_collection",
PartitionName: "_default",
DbID: 1,
CollectionID: 11,
PartitionID: 22,
SegmentID: 33,
Timestamps: []uint64{10},
RowIDs: []int64{66},
NumRows: 1,
},
}
msgBytes, _ := insertMsg.Marshal(insertMsg)
replicateRequest := &milvuspb.ReplicateMessageRequest{
ChannelName: "unit_test_replicate_message",
BeginTs: 1,
EndTs: 10,
Msgs: [][]byte{msgBytes.([]byte)},
StartPositions: []*msgpb.MsgPosition{
{ChannelName: "unit_test_replicate_message", MsgID: []byte("mock message id 1")},
},
EndPositions: []*msgpb.MsgPosition{
{ChannelName: "unit_test_replicate_message", MsgID: []byte("mock message id 2")},
},
}
resp, err := node.ReplicateMessage(context.TODO(), replicateRequest)
assert.NoError(t, err)
assert.EqualValues(t, 0, resp.GetStatus().GetCode())
assert.Equal(t, base64.StdEncoding.EncodeToString([]byte("mock message id 2")), resp.GetPosition())
res := resourceManager.Delete(ReplicateMsgStreamTyp, replicateRequest.GetChannelName())
assert.NotNil(t, res)
time.Sleep(2 * time.Second)
{
broadcastMock.Unset()
broadcastMock = msgStreamObj.EXPECT().Broadcast(mock.Anything).Return(nil, errors.New("mock error: broadcast"))
resp, err := node.ReplicateMessage(context.TODO(), replicateRequest)
assert.NoError(t, err)
assert.NotEqualValues(t, 0, resp.GetStatus().GetCode())
resourceManager.Delete(ReplicateMsgStreamTyp, replicateRequest.GetChannelName())
time.Sleep(2 * time.Second)
}
{
broadcastMock.Unset()
broadcastMock = msgStreamObj.EXPECT().Broadcast(mock.Anything).Return(map[string][]mqwrapper.MessageID{
"unit_test_replicate_message": {},
}, nil)
resp, err := node.ReplicateMessage(context.TODO(), replicateRequest)
assert.NoError(t, err)
assert.EqualValues(t, 0, resp.GetStatus().GetCode())
assert.Empty(t, resp.GetPosition())
resourceManager.Delete(ReplicateMsgStreamTyp, replicateRequest.GetChannelName())
time.Sleep(2 * time.Second)
broadcastMock.Unset()
}
})
}

View File

@ -10,9 +10,10 @@ import (
type mockMsgStream struct { type mockMsgStream struct {
msgstream.MsgStream msgstream.MsgStream
asProducer func([]string) asProducer func([]string)
setRepack func(repackFunc msgstream.RepackFunc) setRepack func(repackFunc msgstream.RepackFunc)
close func() close func()
enableProduce func(bool)
} }
func (m *mockMsgStream) AsProducer(producers []string) { func (m *mockMsgStream) AsProducer(producers []string) {
@ -33,6 +34,12 @@ func (m *mockMsgStream) Close() {
} }
} }
func (m *mockMsgStream) EnableProduce(enabled bool) {
if m.enableProduce != nil {
m.enableProduce(enabled)
}
}
func newMockMsgStream() *mockMsgStream { func newMockMsgStream() *mockMsgStream {
return &mockMsgStream{} return &mockMsgStream{}
} }

View File

@ -306,6 +306,9 @@ func (ms *simpleMockMsgStream) CheckTopicValid(topic string) error {
return nil return nil
} }
func (ms *simpleMockMsgStream) EnableProduce(enabled bool) {
}
func newSimpleMockMsgStream() *simpleMockMsgStream { func newSimpleMockMsgStream() *simpleMockMsgStream {
return &simpleMockMsgStream{ return &simpleMockMsgStream{
msgChan: make(chan *msgstream.MsgPack, 1024), msgChan: make(chan *msgstream.MsgPack, 1024),

View File

@ -40,11 +40,13 @@ import (
"github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics" "github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/commonpbutil" "github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/logutil" "github.com/milvus-io/milvus/pkg/util/logutil"
"github.com/milvus-io/milvus/pkg/util/metricsinfo" "github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable" "github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/ratelimitutil" "github.com/milvus-io/milvus/pkg/util/ratelimitutil"
"github.com/milvus-io/milvus/pkg/util/resource"
"github.com/milvus-io/milvus/pkg/util/tsoutil" "github.com/milvus-io/milvus/pkg/util/tsoutil"
"github.com/milvus-io/milvus/pkg/util/typeutil" "github.com/milvus-io/milvus/pkg/util/typeutil"
) )
@ -88,6 +90,8 @@ type Proxy struct {
chMgr channelsMgr chMgr channelsMgr
replicateMsgStream msgstream.MsgStream
sched *taskScheduler sched *taskScheduler
chTicker channelsTimeTicker chTicker channelsTimeTicker
@ -111,6 +115,10 @@ type Proxy struct {
// for load balance in replicas // for load balance in replicas
lbPolicy LBPolicy lbPolicy LBPolicy
// resource manager
resourceManager resource.Manager
replicateStreamManager *ReplicateStreamManager
} }
// NewProxy returns a Proxy struct. // NewProxy returns a Proxy struct.
@ -121,14 +129,18 @@ func NewProxy(ctx context.Context, factory dependency.Factory) (*Proxy, error) {
mgr := newShardClientMgr() mgr := newShardClientMgr()
lbPolicy := NewLBPolicyImpl(mgr) lbPolicy := NewLBPolicyImpl(mgr)
lbPolicy.Start(ctx) lbPolicy.Start(ctx)
resourceManager := resource.NewManager(10*time.Second, 20*time.Second, make(map[string]time.Duration))
replicateStreamManager := NewReplicateStreamManager(ctx, factory, resourceManager)
node := &Proxy{ node := &Proxy{
ctx: ctx1, ctx: ctx1,
cancel: cancel, cancel: cancel,
factory: factory, factory: factory,
searchResultCh: make(chan *internalpb.SearchResults, n), searchResultCh: make(chan *internalpb.SearchResults, n),
shardMgr: mgr, shardMgr: mgr,
multiRateLimiter: NewMultiRateLimiter(), multiRateLimiter: NewMultiRateLimiter(),
lbPolicy: lbPolicy, lbPolicy: lbPolicy,
resourceManager: resourceManager,
replicateStreamManager: replicateStreamManager,
} }
node.UpdateStateCode(commonpb.StateCode_Abnormal) node.UpdateStateCode(commonpb.StateCode_Abnormal)
logutil.Logger(ctx).Debug("create a new Proxy instance", zap.Any("state", node.stateCode.Load())) logutil.Logger(ctx).Debug("create a new Proxy instance", zap.Any("state", node.stateCode.Load()))
@ -250,6 +262,17 @@ func (node *Proxy) Init() error {
node.chMgr = chMgr node.chMgr = chMgr
log.Debug("create channels manager done", zap.String("role", typeutil.ProxyRole)) log.Debug("create channels manager done", zap.String("role", typeutil.ProxyRole))
replicateMsgChannel := Params.CommonCfg.ReplicateMsgChannel.GetValue()
node.replicateMsgStream, err = node.factory.NewMsgStream(node.ctx)
if err != nil {
log.Warn("failed to create replicate msg stream",
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()),
zap.Error(err))
return err
}
node.replicateMsgStream.EnableProduce(true)
node.replicateMsgStream.AsProducer([]string{replicateMsgChannel})
node.sched, err = newTaskScheduler(node.ctx, node.tsoAllocator, node.factory) node.sched, err = newTaskScheduler(node.ctx, node.tsoAllocator, node.factory)
if err != nil { if err != nil {
log.Warn("failed to create task scheduler", zap.String("role", typeutil.ProxyRole), zap.Error(err)) log.Warn("failed to create task scheduler", zap.String("role", typeutil.ProxyRole), zap.Error(err))
@ -287,6 +310,9 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
log.Info("send channels time tick loop exit") log.Info("send channels time tick loop exit")
return return
case <-ticker.C: case <-ticker.C:
if !Params.CommonCfg.TTMsgEnabled.GetAsBool() {
continue
}
stats, ts, err := node.chTicker.getMinTsStatistics() stats, ts, err := node.chTicker.getMinTsStatistics()
if err != nil { if err != nil {
log.Warn("sendChannelsTimeTickLoop.getMinTsStatistics", zap.Error(err)) log.Warn("sendChannelsTimeTickLoop.getMinTsStatistics", zap.Error(err))
@ -442,6 +468,10 @@ func (node *Proxy) Stop() error {
node.lbPolicy.Close() node.lbPolicy.Close()
} }
if node.resourceManager != nil {
node.resourceManager.Close()
}
// https://github.com/milvus-io/milvus/issues/12282 // https://github.com/milvus-io/milvus/issues/12282
node.UpdateStateCode(commonpb.StateCode_Abnormal) node.UpdateStateCode(commonpb.StateCode_Abnormal)

View File

@ -81,3 +81,14 @@ func defaultInsertRepackFunc(
} }
return pack, nil return pack, nil
} }
func replicatePackFunc(
tsMsgs []msgstream.TsMsg,
hashKeys [][]int32,
) (map[int32]*msgstream.MsgPack, error) {
return map[int32]*msgstream.MsgPack{
0: {
Msgs: tsMsgs,
},
}, nil
}

View File

@ -0,0 +1,72 @@
package proxy
import (
"context"
"time"
"go.uber.org/zap"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/resource"
)
const (
ReplicateMsgStreamTyp = "replicate_msg_stream"
ReplicateMsgStreamExpireTime = 30 * time.Second
)
type ReplicateStreamManager struct {
ctx context.Context
factory msgstream.Factory
dispatcher msgstream.UnmarshalDispatcher
resourceManager resource.Manager
}
func NewReplicateStreamManager(ctx context.Context, factory msgstream.Factory, resourceManager resource.Manager) *ReplicateStreamManager {
manager := &ReplicateStreamManager{
ctx: ctx,
factory: factory,
dispatcher: (&msgstream.ProtoUDFactory{}).NewUnmarshalDispatcher(),
resourceManager: resourceManager,
}
return manager
}
func (m *ReplicateStreamManager) newMsgStreamResource(channel string) resource.NewResourceFunc {
return func() (resource.Resource, error) {
msgStream, err := m.factory.NewMsgStream(m.ctx)
if err != nil {
log.Ctx(m.ctx).Warn("failed to create msg stream", zap.String("channel", channel), zap.Error(err))
return nil, err
}
msgStream.SetRepackFunc(replicatePackFunc)
msgStream.AsProducer([]string{channel})
msgStream.EnableProduce(true)
res := resource.NewSimpleResource(msgStream, ReplicateMsgStreamTyp, channel, ReplicateMsgStreamExpireTime, func() {
msgStream.Close()
})
return res, nil
}
}
func (m *ReplicateStreamManager) GetReplicateMsgStream(ctx context.Context, channel string) (msgstream.MsgStream, error) {
ctxLog := log.Ctx(ctx).With(zap.String("proxy_channel", channel))
res, err := m.resourceManager.Get(ReplicateMsgStreamTyp, channel, m.newMsgStreamResource(channel))
if err != nil {
ctxLog.Warn("failed to get replicate msg stream", zap.String("channel", channel), zap.Error(err))
return nil, err
}
if obj, ok := res.Get().(msgstream.MsgStream); ok && obj != nil {
return obj, nil
}
ctxLog.Warn("invalid resource object", zap.Any("obj", res.Get()))
return nil, merr.ErrInvalidStreamObj
}
func (m *ReplicateStreamManager) GetMsgDispatcher() msgstream.UnmarshalDispatcher {
return m.dispatcher
}

View File

@ -0,0 +1,79 @@
package proxy
import (
"context"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/resource"
)
func TestReplicateManager(t *testing.T) {
factory := newMockMsgStreamFactory()
resourceManager := resource.NewManager(time.Second, 2*time.Second, nil)
manager := NewReplicateStreamManager(context.Background(), factory, resourceManager)
{
factory.f = func(ctx context.Context) (msgstream.MsgStream, error) {
return nil, errors.New("mock msgstream fail")
}
_, err := manager.GetReplicateMsgStream(context.Background(), "test")
assert.Error(t, err)
}
{
mockMsgStream := newMockMsgStream()
i := 0
mockMsgStream.setRepack = func(repackFunc msgstream.RepackFunc) {
i++
}
mockMsgStream.asProducer = func(producers []string) {
i++
}
mockMsgStream.enableProduce = func(b bool) {
i++
}
mockMsgStream.close = func() {
i++
}
factory.f = func(ctx context.Context) (msgstream.MsgStream, error) {
return mockMsgStream, nil
}
_, err := manager.GetReplicateMsgStream(context.Background(), "test")
assert.NoError(t, err)
assert.Equal(t, 3, i)
time.Sleep(time.Second)
_, err = manager.GetReplicateMsgStream(context.Background(), "test")
assert.NoError(t, err)
assert.Equal(t, 3, i)
res := resourceManager.Delete(ReplicateMsgStreamTyp, "test")
assert.NotNil(t, res)
time.Sleep(2 * time.Second)
_, err = manager.GetReplicateMsgStream(context.Background(), "test")
assert.NoError(t, err)
assert.Equal(t, 7, i)
}
{
res := resourceManager.Delete(ReplicateMsgStreamTyp, "test")
assert.NotNil(t, res)
time.Sleep(2 * time.Second)
res, err := resourceManager.Get(ReplicateMsgStreamTyp, "test", func() (resource.Resource, error) {
return resource.NewResource(resource.WithObj("str")), nil
})
assert.NoError(t, err)
assert.Equal(t, "str", res.Get())
_, err = manager.GetReplicateMsgStream(context.Background(), "test")
assert.ErrorIs(t, err, merr.ErrInvalidStreamObj)
}
{
assert.NotNil(t, manager.GetMsgDispatcher())
}
}

View File

@ -156,7 +156,9 @@ func (t *createCollectionTask) SetTs(ts Timestamp) {
} }
func (t *createCollectionTask) OnEnqueue() error { func (t *createCollectionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
t.Base.MsgType = commonpb.MsgType_CreateCollection t.Base.MsgType = commonpb.MsgType_CreateCollection
t.Base.SourceID = paramtable.GetNodeID() t.Base.SourceID = paramtable.GetNodeID()
return nil return nil
@ -354,7 +356,9 @@ func (t *dropCollectionTask) SetTs(ts Timestamp) {
} }
func (t *dropCollectionTask) OnEnqueue() error { func (t *dropCollectionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -786,7 +790,9 @@ func (t *alterCollectionTask) SetTs(ts Timestamp) {
} }
func (t *alterCollectionTask) OnEnqueue() error { func (t *alterCollectionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -848,7 +854,9 @@ func (t *createPartitionTask) SetTs(ts Timestamp) {
} }
func (t *createPartitionTask) OnEnqueue() error { func (t *createPartitionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -934,7 +942,9 @@ func (t *dropPartitionTask) SetTs(ts Timestamp) {
} }
func (t *dropPartitionTask) OnEnqueue() error { func (t *dropPartitionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1248,6 +1258,8 @@ type flushTask struct {
ctx context.Context ctx context.Context
dataCoord types.DataCoordClient dataCoord types.DataCoordClient
result *milvuspb.FlushResponse result *milvuspb.FlushResponse
replicateMsgStream msgstream.MsgStream
} }
func (t *flushTask) TraceCtx() context.Context { func (t *flushTask) TraceCtx() context.Context {
@ -1283,7 +1295,9 @@ func (t *flushTask) SetTs(ts Timestamp) {
} }
func (t *flushTask) OnEnqueue() error { func (t *flushTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1323,6 +1337,7 @@ func (t *flushTask) Execute(ctx context.Context) error {
coll2SealTimes[collName] = resp.GetTimeOfSeal() coll2SealTimes[collName] = resp.GetTimeOfSeal()
coll2FlushTs[collName] = resp.GetFlushTs() coll2FlushTs[collName] = resp.GetFlushTs()
} }
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.FlushRequest)
t.result = &milvuspb.FlushResponse{ t.result = &milvuspb.FlushResponse{
Status: merr.Success(), Status: merr.Success(),
DbName: t.GetDbName(), DbName: t.GetDbName(),
@ -1346,7 +1361,8 @@ type loadCollectionTask struct {
datacoord types.DataCoordClient datacoord types.DataCoordClient
result *commonpb.Status result *commonpb.Status
collectionID UniqueID collectionID UniqueID
replicateMsgStream msgstream.MsgStream
} }
func (t *loadCollectionTask) TraceCtx() context.Context { func (t *loadCollectionTask) TraceCtx() context.Context {
@ -1382,7 +1398,9 @@ func (t *loadCollectionTask) SetTs(ts Timestamp) {
} }
func (t *loadCollectionTask) OnEnqueue() error { func (t *loadCollectionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1472,6 +1490,7 @@ func (t *loadCollectionTask) Execute(ctx context.Context) (err error) {
if err != nil { if err != nil {
return fmt.Errorf("call query coordinator LoadCollection: %s", err) return fmt.Errorf("call query coordinator LoadCollection: %s", err)
} }
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.LoadCollectionRequest)
return nil return nil
} }
@ -1492,9 +1511,9 @@ type releaseCollectionTask struct {
ctx context.Context ctx context.Context
queryCoord types.QueryCoordClient queryCoord types.QueryCoordClient
result *commonpb.Status result *commonpb.Status
chMgr channelsMgr
collectionID UniqueID collectionID UniqueID
replicateMsgStream msgstream.MsgStream
} }
func (t *releaseCollectionTask) TraceCtx() context.Context { func (t *releaseCollectionTask) TraceCtx() context.Context {
@ -1530,7 +1549,9 @@ func (t *releaseCollectionTask) SetTs(ts Timestamp) {
} }
func (t *releaseCollectionTask) OnEnqueue() error { func (t *releaseCollectionTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1565,7 +1586,10 @@ func (t *releaseCollectionTask) Execute(ctx context.Context) (err error) {
t.result, err = t.queryCoord.ReleaseCollection(ctx, request) t.result, err = t.queryCoord.ReleaseCollection(ctx, request)
globalMetaCache.RemoveCollection(ctx, t.GetDbName(), t.CollectionName) globalMetaCache.RemoveCollection(ctx, t.GetDbName(), t.CollectionName)
if err != nil {
return err
}
SendReplicateMessagePack(ctx, t.replicateMsgStream, t.ReleaseCollectionRequest)
return err return err
} }
@ -1618,7 +1642,9 @@ func (t *loadPartitionsTask) SetTs(ts Timestamp) {
} }
func (t *loadPartitionsTask) OnEnqueue() error { func (t *loadPartitionsTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1759,7 +1785,9 @@ func (t *releasePartitionsTask) SetTs(ts Timestamp) {
} }
func (t *releasePartitionsTask) OnEnqueue() error { func (t *releasePartitionsTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1867,7 +1895,9 @@ func (t *CreateAliasTask) SetTs(ts Timestamp) {
// OnEnqueue defines the behavior task enqueued // OnEnqueue defines the behavior task enqueued
func (t *CreateAliasTask) OnEnqueue() error { func (t *CreateAliasTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -1947,7 +1977,9 @@ func (t *DropAliasTask) SetTs(ts Timestamp) {
} }
func (t *DropAliasTask) OnEnqueue() error { func (t *DropAliasTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -2013,7 +2045,9 @@ func (t *AlterAliasTask) SetTs(ts Timestamp) {
} }
func (t *AlterAliasTask) OnEnqueue() error { func (t *AlterAliasTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -2086,7 +2120,9 @@ func (t *CreateResourceGroupTask) SetTs(ts Timestamp) {
} }
func (t *CreateResourceGroupTask) OnEnqueue() error { func (t *CreateResourceGroupTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -2148,7 +2184,9 @@ func (t *DropResourceGroupTask) SetTs(ts Timestamp) {
} }
func (t *DropResourceGroupTask) OnEnqueue() error { func (t *DropResourceGroupTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -2331,7 +2369,9 @@ func (t *TransferNodeTask) SetTs(ts Timestamp) {
} }
func (t *TransferNodeTask) OnEnqueue() error { func (t *TransferNodeTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -2393,7 +2433,9 @@ func (t *TransferReplicaTask) SetTs(ts Timestamp) {
} }
func (t *TransferReplicaTask) OnEnqueue() error { func (t *TransferReplicaTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase() if t.Base == nil {
t.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/commonpbutil" "github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/paramtable" "github.com/milvus-io/milvus/pkg/util/paramtable"
) )
@ -16,6 +17,8 @@ type createDatabaseTask struct {
ctx context.Context ctx context.Context
rootCoord types.RootCoordClient rootCoord types.RootCoordClient
result *commonpb.Status result *commonpb.Status
replicateMsgStream msgstream.MsgStream
} }
func (cdt *createDatabaseTask) TraceCtx() context.Context { func (cdt *createDatabaseTask) TraceCtx() context.Context {
@ -51,7 +54,9 @@ func (cdt *createDatabaseTask) SetTs(ts Timestamp) {
} }
func (cdt *createDatabaseTask) OnEnqueue() error { func (cdt *createDatabaseTask) OnEnqueue() error {
cdt.Base = commonpbutil.NewMsgBase() if cdt.Base == nil {
cdt.Base = commonpbutil.NewMsgBase()
}
cdt.Base.MsgType = commonpb.MsgType_CreateDatabase cdt.Base.MsgType = commonpb.MsgType_CreateDatabase
cdt.Base.SourceID = paramtable.GetNodeID() cdt.Base.SourceID = paramtable.GetNodeID()
return nil return nil
@ -64,6 +69,9 @@ func (cdt *createDatabaseTask) PreExecute(ctx context.Context) error {
func (cdt *createDatabaseTask) Execute(ctx context.Context) error { func (cdt *createDatabaseTask) Execute(ctx context.Context) error {
var err error var err error
cdt.result, err = cdt.rootCoord.CreateDatabase(ctx, cdt.CreateDatabaseRequest) cdt.result, err = cdt.rootCoord.CreateDatabase(ctx, cdt.CreateDatabaseRequest)
if cdt.result != nil && cdt.result.ErrorCode == commonpb.ErrorCode_Success {
SendReplicateMessagePack(ctx, cdt.replicateMsgStream, cdt.CreateDatabaseRequest)
}
return err return err
} }
@ -77,6 +85,8 @@ type dropDatabaseTask struct {
ctx context.Context ctx context.Context
rootCoord types.RootCoordClient rootCoord types.RootCoordClient
result *commonpb.Status result *commonpb.Status
replicateMsgStream msgstream.MsgStream
} }
func (ddt *dropDatabaseTask) TraceCtx() context.Context { func (ddt *dropDatabaseTask) TraceCtx() context.Context {
@ -112,7 +122,9 @@ func (ddt *dropDatabaseTask) SetTs(ts Timestamp) {
} }
func (ddt *dropDatabaseTask) OnEnqueue() error { func (ddt *dropDatabaseTask) OnEnqueue() error {
ddt.Base = commonpbutil.NewMsgBase() if ddt.Base == nil {
ddt.Base = commonpbutil.NewMsgBase()
}
ddt.Base.MsgType = commonpb.MsgType_DropDatabase ddt.Base.MsgType = commonpb.MsgType_DropDatabase
ddt.Base.SourceID = paramtable.GetNodeID() ddt.Base.SourceID = paramtable.GetNodeID()
return nil return nil
@ -128,6 +140,7 @@ func (ddt *dropDatabaseTask) Execute(ctx context.Context) error {
if ddt.result != nil && ddt.result.ErrorCode == commonpb.ErrorCode_Success { if ddt.result != nil && ddt.result.ErrorCode == commonpb.ErrorCode_Success {
globalMetaCache.RemoveDatabase(ctx, ddt.DbName) globalMetaCache.RemoveDatabase(ctx, ddt.DbName)
SendReplicateMessagePack(ctx, ddt.replicateMsgStream, ddt.DropDatabaseRequest)
} }
return err return err
} }

View File

@ -45,6 +45,7 @@ func TestCreateDatabaseTask(t *testing.T) {
err = task.Execute(ctx) err = task.Execute(ctx)
assert.NoError(t, err) assert.NoError(t, err)
task.Base = nil
err = task.OnEnqueue() err = task.OnEnqueue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID()) assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
@ -98,6 +99,7 @@ func TestDropDatabaseTask(t *testing.T) {
err = task.Execute(ctx) err = task.Execute(ctx)
assert.NoError(t, err) assert.NoError(t, err)
task.Base = nil
err = task.OnEnqueue() err = task.OnEnqueue()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID()) assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/commonpbutil" "github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/indexparamcheck" "github.com/milvus-io/milvus/pkg/util/indexparamcheck"
@ -59,6 +60,8 @@ type createIndexTask struct {
datacoord types.DataCoordClient datacoord types.DataCoordClient
result *commonpb.Status result *commonpb.Status
replicateMsgStream msgstream.MsgStream
isAutoIndex bool isAutoIndex bool
newIndexParams []*commonpb.KeyValuePair newIndexParams []*commonpb.KeyValuePair
newTypeParams []*commonpb.KeyValuePair newTypeParams []*commonpb.KeyValuePair
@ -101,7 +104,9 @@ func (cit *createIndexTask) SetTs(ts Timestamp) {
} }
func (cit *createIndexTask) OnEnqueue() error { func (cit *createIndexTask) OnEnqueue() error {
cit.req.Base = commonpbutil.NewMsgBase() if cit.req.Base == nil {
cit.req.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -414,7 +419,8 @@ func (cit *createIndexTask) Execute(ctx context.Context) error {
if cit.result.ErrorCode != commonpb.ErrorCode_Success { if cit.result.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(cit.result.Reason) return errors.New(cit.result.Reason)
} }
return err SendReplicateMessagePack(ctx, cit.replicateMsgStream, cit.req)
return nil
} }
func (cit *createIndexTask) PostExecute(ctx context.Context) error { func (cit *createIndexTask) PostExecute(ctx context.Context) error {
@ -669,6 +675,8 @@ type dropIndexTask struct {
result *commonpb.Status result *commonpb.Status
collectionID UniqueID collectionID UniqueID
replicateMsgStream msgstream.MsgStream
} }
func (dit *dropIndexTask) TraceCtx() context.Context { func (dit *dropIndexTask) TraceCtx() context.Context {
@ -704,7 +712,9 @@ func (dit *dropIndexTask) SetTs(ts Timestamp) {
} }
func (dit *dropIndexTask) OnEnqueue() error { func (dit *dropIndexTask) OnEnqueue() error {
dit.Base = commonpbutil.NewMsgBase() if dit.Base == nil {
dit.Base = commonpbutil.NewMsgBase()
}
return nil return nil
} }
@ -743,6 +753,13 @@ func (dit *dropIndexTask) PreExecute(ctx context.Context) error {
} }
func (dit *dropIndexTask) Execute(ctx context.Context) error { func (dit *dropIndexTask) Execute(ctx context.Context) error {
ctxLog := log.Ctx(ctx)
ctxLog.Info("proxy drop index", zap.Int64("collID", dit.collectionID),
zap.String("field_name", dit.FieldName),
zap.String("index_name", dit.IndexName),
zap.String("db_name", dit.DbName),
)
var err error var err error
dit.result, err = dit.dataCoord.DropIndex(ctx, &indexpb.DropIndexRequest{ dit.result, err = dit.dataCoord.DropIndex(ctx, &indexpb.DropIndexRequest{
CollectionID: dit.collectionID, CollectionID: dit.collectionID,
@ -750,13 +767,18 @@ func (dit *dropIndexTask) Execute(ctx context.Context) error {
IndexName: dit.IndexName, IndexName: dit.IndexName,
DropAll: false, DropAll: false,
}) })
if err != nil {
ctxLog.Warn("drop index failed", zap.Error(err))
return err
}
if dit.result == nil { if dit.result == nil {
return errors.New("drop index resp is nil") return errors.New("drop index resp is nil")
} }
if dit.result.ErrorCode != commonpb.ErrorCode_Success { if dit.result.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(dit.result.Reason) return errors.New(dit.result.Reason)
} }
return err SendReplicateMessagePack(ctx, dit.replicateMsgStream, dit.DropIndexRequest)
return nil
} }
func (dit *dropIndexTask) PostExecute(ctx context.Context) error { func (dit *dropIndexTask) PostExecute(ctx context.Context) error {

View File

@ -1504,3 +1504,76 @@ func checkDynamicFieldData(schema *schemapb.CollectionSchema, insertMsg *msgstre
insertMsg.FieldsData = append(insertMsg.FieldsData, dynamicData) insertMsg.FieldsData = append(insertMsg.FieldsData, dynamicData)
return nil return nil
} }
func SendReplicateMessagePack(ctx context.Context, replicateMsgStream msgstream.MsgStream, request interface{ GetBase() *commonpb.MsgBase }) {
if replicateMsgStream == nil || request == nil {
log.Warn("replicate msg stream or request is nil", zap.Any("request", request))
return
}
msgBase := request.GetBase()
ts := msgBase.GetTimestamp()
if msgBase.GetReplicateInfo().GetIsReplicate() {
ts = msgBase.GetReplicateInfo().GetMsgTimestamp()
}
getBaseMsg := func(ctx context.Context, ts uint64) msgstream.BaseMsg {
return msgstream.BaseMsg{
Ctx: ctx,
HashValues: []uint32{0},
BeginTimestamp: ts,
EndTimestamp: ts,
}
}
var tsMsg msgstream.TsMsg
switch r := request.(type) {
case *milvuspb.CreateDatabaseRequest:
tsMsg = &msgstream.CreateDatabaseMsg{
BaseMsg: getBaseMsg(ctx, ts),
CreateDatabaseRequest: *r,
}
case *milvuspb.DropDatabaseRequest:
tsMsg = &msgstream.DropDatabaseMsg{
BaseMsg: getBaseMsg(ctx, ts),
DropDatabaseRequest: *r,
}
case *milvuspb.FlushRequest:
tsMsg = &msgstream.FlushMsg{
BaseMsg: getBaseMsg(ctx, ts),
FlushRequest: *r,
}
case *milvuspb.LoadCollectionRequest:
tsMsg = &msgstream.LoadCollectionMsg{
BaseMsg: getBaseMsg(ctx, ts),
LoadCollectionRequest: *r,
}
case *milvuspb.ReleaseCollectionRequest:
tsMsg = &msgstream.ReleaseCollectionMsg{
BaseMsg: getBaseMsg(ctx, ts),
ReleaseCollectionRequest: *r,
}
case *milvuspb.CreateIndexRequest:
tsMsg = &msgstream.CreateIndexMsg{
BaseMsg: getBaseMsg(ctx, ts),
CreateIndexRequest: *r,
}
case *milvuspb.DropIndexRequest:
tsMsg = &msgstream.DropIndexMsg{
BaseMsg: getBaseMsg(ctx, ts),
DropIndexRequest: *r,
}
default:
log.Warn("unknown request", zap.Any("request", request))
return
}
msgPack := &msgstream.MsgPack{
BeginTs: ts,
EndTs: ts,
Msgs: []msgstream.TsMsg{tsMsg},
}
msgErr := replicateMsgStream.Produce(msgPack)
// ignore the error if the msg stream failed to produce the msg,
// because it can be manually fixed in this error
if msgErr != nil {
log.Warn("send replicate msg failed", zap.Any("pack", msgPack), zap.Error(msgErr))
}
}

View File

@ -2052,3 +2052,38 @@ func Test_validateMaxCapacityPerRow(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
}) })
} }
func TestSendReplicateMessagePack(t *testing.T) {
ctx := context.Background()
mockStream := msgstream.NewMockMsgStream(t)
t.Run("empty case", func(t *testing.T) {
SendReplicateMessagePack(ctx, nil, nil)
})
t.Run("produce fail", func(t *testing.T) {
mockStream.EXPECT().Produce(mock.Anything).Return(errors.New("produce error")).Once()
SendReplicateMessagePack(ctx, mockStream, &milvuspb.CreateDatabaseRequest{
Base: &commonpb.MsgBase{ReplicateInfo: &commonpb.ReplicateInfo{
IsReplicate: true,
MsgTimestamp: 100,
}},
})
})
t.Run("unknown request", func(t *testing.T) {
SendReplicateMessagePack(ctx, mockStream, &milvuspb.ListDatabasesRequest{})
})
t.Run("normal case", func(t *testing.T) {
mockStream.EXPECT().Produce(mock.Anything).Return(nil)
SendReplicateMessagePack(ctx, mockStream, &milvuspb.CreateDatabaseRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.DropDatabaseRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.FlushRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.LoadCollectionRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.ReleaseCollectionRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.CreateIndexRequest{})
SendReplicateMessagePack(ctx, mockStream, &milvuspb.DropIndexRequest{})
})
}

View File

@ -349,8 +349,7 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error {
return t.assignChannels() return t.assignChannels()
} }
func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.MsgPack { func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context, ts uint64) *ms.MsgPack {
ts := t.GetTs()
collectionID := t.collID collectionID := t.collID
partitionIDs := t.partIDs partitionIDs := t.partIDs
// error won't happen here. // error won't happen here.
@ -382,21 +381,36 @@ func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.M
return &msgPack return &msgPack
} }
func (t *createCollectionTask) addChannelsAndGetStartPositions(ctx context.Context) (map[string][]byte, error) { func (t *createCollectionTask) addChannelsAndGetStartPositions(ctx context.Context, ts uint64) (map[string][]byte, error) {
t.core.chanTimeTick.addDmlChannels(t.channels.physicalChannels...) t.core.chanTimeTick.addDmlChannels(t.channels.physicalChannels...)
msg := t.genCreateCollectionMsg(ctx) msg := t.genCreateCollectionMsg(ctx, ts)
return t.core.chanTimeTick.broadcastMarkDmlChannels(t.channels.physicalChannels, msg) return t.core.chanTimeTick.broadcastMarkDmlChannels(t.channels.physicalChannels, msg)
} }
func (t *createCollectionTask) getCreateTs() (uint64, error) {
replicateInfo := t.Req.GetBase().GetReplicateInfo()
if !replicateInfo.GetIsReplicate() {
return t.GetTs(), nil
}
if replicateInfo.GetMsgTimestamp() == 0 {
log.Warn("the cdc timestamp is not set in the request for the backup instance")
return 0, merr.WrapErrParameterInvalidMsg("the cdc timestamp is not set in the request for the backup instance")
}
return replicateInfo.GetMsgTimestamp(), nil
}
func (t *createCollectionTask) Execute(ctx context.Context) error { func (t *createCollectionTask) Execute(ctx context.Context) error {
collID := t.collID collID := t.collID
partIDs := t.partIDs partIDs := t.partIDs
ts := t.GetTs() ts, err := t.getCreateTs()
if err != nil {
return err
}
vchanNames := t.channels.virtualChannels vchanNames := t.channels.virtualChannels
chanNames := t.channels.physicalChannels chanNames := t.channels.physicalChannels
startPositions, err := t.addChannelsAndGetStartPositions(ctx) startPositions, err := t.addChannelsAndGetStartPositions(ctx, ts)
if err != nil { if err != nil {
// ugly here, since we must get start positions first. // ugly here, since we must get start positions first.
t.core.chanTimeTick.removeDmlChannels(t.channels.physicalChannels...) t.core.chanTimeTick.removeDmlChannels(t.channels.physicalChannels...)
@ -445,7 +459,7 @@ func (t *createCollectionTask) Execute(ctx context.Context) error {
return fmt.Errorf("create duplicate collection with different parameters, collection: %s", t.Req.GetCollectionName()) return fmt.Errorf("create duplicate collection with different parameters, collection: %s", t.Req.GetCollectionName())
} }
// make creating collection idempotent. // make creating collection idempotent.
log.Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", t.GetTs())) log.Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", ts))
return nil return nil
} }
@ -475,6 +489,7 @@ func (t *createCollectionTask) Execute(ctx context.Context) error {
baseStep: baseStep{core: t.core}, baseStep: baseStep{core: t.core},
collectionID: collID, collectionID: collID,
channels: t.channels, channels: t.channels,
isSkip: !Params.CommonCfg.TTMsgEnabled.GetAsBool(),
}) })
undoTask.AddStep(&watchChannelsStep{ undoTask.AddStep(&watchChannelsStep{
baseStep: baseStep{core: t.core}, baseStep: baseStep{core: t.core},

View File

@ -51,6 +51,40 @@ func Test_createCollectionTask_validate(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
}) })
t.Run("create ts", func(t *testing.T) {
task := createCollectionTask{
Req: nil,
}
{
task.SetTs(1000)
ts, err := task.getCreateTs()
assert.NoError(t, err)
assert.EqualValues(t, 1000, ts)
}
task.Req = &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_CreateCollection,
ReplicateInfo: &commonpb.ReplicateInfo{
IsReplicate: true,
},
},
}
{
task.SetTs(1000)
_, err := task.getCreateTs()
assert.Error(t, err)
err = task.Execute(context.Background())
assert.Error(t, err)
}
{
task.Req.Base.ReplicateInfo.MsgTimestamp = 2000
ts, err := task.getCreateTs()
assert.NoError(t, err)
assert.EqualValues(t, 2000, ts)
}
})
t.Run("invalid msg type", func(t *testing.T) { t.Run("invalid msg type", func(t *testing.T) {
task := createCollectionTask{ task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{ Req: &milvuspb.CreateCollectionRequest{

View File

@ -100,6 +100,7 @@ func (t *dropCollectionTask) Execute(ctx context.Context) error {
redoTask.AddAsyncStep(&deleteCollectionDataStep{ redoTask.AddAsyncStep(&deleteCollectionDataStep{
baseStep: baseStep{core: t.core}, baseStep: baseStep{core: t.core},
coll: collMeta, coll: collMeta,
isSkip: t.Req.GetBase().GetReplicateInfo().GetIsReplicate(),
}) })
redoTask.AddAsyncStep(&removeDmlChannelsStep{ redoTask.AddAsyncStep(&removeDmlChannelsStep{
baseStep: baseStep{core: t.core}, baseStep: baseStep{core: t.core},

View File

@ -91,6 +91,7 @@ func (t *dropPartitionTask) Execute(ctx context.Context) error {
PartitionName: t.Req.GetPartitionName(), PartitionName: t.Req.GetPartitionName(),
CollectionID: t.collMeta.CollectionID, CollectionID: t.collMeta.CollectionID,
}, },
isSkip: t.Req.GetBase().GetReplicateInfo().GetIsReplicate(),
}) })
redoTask.AddAsyncStep(newConfirmGCStep(t.core, t.collMeta.CollectionID, partID)) redoTask.AddAsyncStep(newConfirmGCStep(t.core, t.collMeta.CollectionID, partID))
redoTask.AddAsyncStep(&removePartitionMetaStep{ redoTask.AddAsyncStep(&removePartitionMetaStep{

View File

@ -61,6 +61,7 @@ func (c *bgGarbageCollector) ReDropCollection(collMeta *model.Collection, ts Tim
redo.AddAsyncStep(&deleteCollectionDataStep{ redo.AddAsyncStep(&deleteCollectionDataStep{
baseStep: baseStep{core: c.s}, baseStep: baseStep{core: c.s},
coll: collMeta, coll: collMeta,
isSkip: !Params.CommonCfg.TTMsgEnabled.GetAsBool(),
}) })
redo.AddAsyncStep(&removeDmlChannelsStep{ redo.AddAsyncStep(&removeDmlChannelsStep{
baseStep: baseStep{core: c.s}, baseStep: baseStep{core: c.s},
@ -93,6 +94,7 @@ func (c *bgGarbageCollector) RemoveCreatingCollection(collMeta *model.Collection
virtualChannels: collMeta.VirtualChannelNames, virtualChannels: collMeta.VirtualChannelNames,
physicalChannels: collMeta.PhysicalChannelNames, physicalChannels: collMeta.PhysicalChannelNames,
}, },
isSkip: !Params.CommonCfg.TTMsgEnabled.GetAsBool(),
}) })
redo.AddAsyncStep(&removeDmlChannelsStep{ redo.AddAsyncStep(&removeDmlChannelsStep{
baseStep: baseStep{core: c.s}, baseStep: baseStep{core: c.s},
@ -117,6 +119,7 @@ func (c *bgGarbageCollector) ReDropPartition(dbID int64, pChannels []string, par
baseStep: baseStep{core: c.s}, baseStep: baseStep{core: c.s},
pchans: pChannels, pchans: pChannels,
partition: partition, partition: partition,
isSkip: !Params.CommonCfg.TTMsgEnabled.GetAsBool(),
}) })
redo.AddAsyncStep(&removeDmlChannelsStep{ redo.AddAsyncStep(&removeDmlChannelsStep{
baseStep: baseStep{core: c.s}, baseStep: baseStep{core: c.s},

View File

@ -179,6 +179,9 @@ func (c *Core) sendTimeTick(t Timestamp, reason string) error {
} }
func (c *Core) sendMinDdlTsAsTt() { func (c *Core) sendMinDdlTsAsTt() {
if !paramtable.Get().CommonCfg.TTMsgEnabled.GetAsBool() {
return
}
code := c.GetStateCode() code := c.GetStateCode()
if code != commonpb.StateCode_Healthy { if code != commonpb.StateCode_Healthy {
log.Warn("rootCoord is not healthy, skip send timetick") log.Warn("rootCoord is not healthy, skip send timetick")

View File

@ -1649,6 +1649,11 @@ func TestCore_sendMinDdlTsAsTt(t *testing.T) {
c.UpdateStateCode(commonpb.StateCode_Healthy) c.UpdateStateCode(commonpb.StateCode_Healthy)
c.session.ServerID = TestRootCoordID c.session.ServerID = TestRootCoordID
_ = paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "false")
c.sendMinDdlTsAsTt() // disable ts msg
_ = paramtable.Get().Save(paramtable.Get().CommonCfg.TTMsgEnabled.Key, "true")
c.sendMinDdlTsAsTt() // no session. c.sendMinDdlTsAsTt() // no session.
ticker.addSession(&sessionutil.Session{SessionRaw: sessionutil.SessionRaw{ServerID: TestRootCoordID}}) ticker.addSession(&sessionutil.Session{SessionRaw: sessionutil.SessionRaw{ServerID: TestRootCoordID}})
c.sendMinDdlTsAsTt() c.sendMinDdlTsAsTt()

View File

@ -125,12 +125,15 @@ type unwatchChannelsStep struct {
baseStep baseStep
collectionID UniqueID collectionID UniqueID
channels collectionChannels channels collectionChannels
isSkip bool
} }
func (s *unwatchChannelsStep) Execute(ctx context.Context) ([]nestedStep, error) { func (s *unwatchChannelsStep) Execute(ctx context.Context) ([]nestedStep, error) {
unwatchByDropMsg := &deleteCollectionDataStep{ unwatchByDropMsg := &deleteCollectionDataStep{
baseStep: baseStep{core: s.core}, baseStep: baseStep{core: s.core},
coll: &model.Collection{CollectionID: s.collectionID, PhysicalChannelNames: s.channels.physicalChannels}, coll: &model.Collection{CollectionID: s.collectionID, PhysicalChannelNames: s.channels.physicalChannels},
isSkip: s.isSkip,
} }
return unwatchByDropMsg.Execute(ctx) return unwatchByDropMsg.Execute(ctx)
} }
@ -183,9 +186,14 @@ func (s *expireCacheStep) Desc() string {
type deleteCollectionDataStep struct { type deleteCollectionDataStep struct {
baseStep baseStep
coll *model.Collection coll *model.Collection
isSkip bool
} }
func (s *deleteCollectionDataStep) Execute(ctx context.Context) ([]nestedStep, error) { func (s *deleteCollectionDataStep) Execute(ctx context.Context) ([]nestedStep, error) {
if s.isSkip {
return nil, nil
}
ddlTs, err := s.core.garbageCollector.GcCollectionData(ctx, s.coll) ddlTs, err := s.core.garbageCollector.GcCollectionData(ctx, s.coll)
if err != nil { if err != nil {
return nil, err return nil, err
@ -239,9 +247,14 @@ type deletePartitionDataStep struct {
baseStep baseStep
pchans []string pchans []string
partition *model.Partition partition *model.Partition
isSkip bool
} }
func (s *deletePartitionDataStep) Execute(ctx context.Context) ([]nestedStep, error) { func (s *deletePartitionDataStep) Execute(ctx context.Context) ([]nestedStep, error) {
if s.isSkip {
return nil, nil
}
_, err := s.core.garbageCollector.GcPartitionData(ctx, s.pchans, s.partition) _, err := s.core.garbageCollector.GcPartitionData(ctx, s.pchans, s.partition)
return nil, err return nil, err
} }

View File

@ -95,3 +95,23 @@ func Test_confirmGCStep_Execute(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
} }
func TestSkip(t *testing.T) {
{
s := &unwatchChannelsStep{isSkip: true}
_, err := s.Execute(context.Background())
assert.NoError(t, err)
}
{
s := &deleteCollectionDataStep{isSkip: true}
_, err := s.Execute(context.Background())
assert.NoError(t, err)
}
{
s := &deletePartitionDataStep{isSkip: true}
_, err := s.Execute(context.Background())
assert.NoError(t, err)
}
}

View File

@ -636,6 +636,7 @@ func TestSessionProcessActiveStandBy(t *testing.T) {
}) })
wg.Wait() wg.Wait()
s1.LivenessCheck(ctx1, func() { s1.LivenessCheck(ctx1, func() {
log.Debug("Session 1 livenessCheck callback")
flag = true flag = true
close(signal) close(signal)
s1.cancelKeepAlive() s1.cancelKeepAlive()
@ -675,8 +676,10 @@ func TestSessionProcessActiveStandBy(t *testing.T) {
t.FailNow() t.FailNow()
} }
assert.True(t, flag) assert.True(t, flag)
log.Debug("session s1 stop")
wg.Wait() wg.Wait()
log.Debug("session s2 wait done")
assert.False(t, s2.isStandby.Load().(bool)) assert.False(t, s2.isStandby.Load().(bool))
s2.Stop() s2.Stop()
} }

View File

@ -15,3 +15,4 @@ generate-mockery: getdeps
$(INSTALL_PATH)/mockery --name=MsgStream --dir=$(PWD)/mq/msgstream --output=$(PWD)/mq/msgstream --filename=mock_msgstream.go --with-expecter --structname=MockMsgStream --outpkg=msgstream --inpackage $(INSTALL_PATH)/mockery --name=MsgStream --dir=$(PWD)/mq/msgstream --output=$(PWD)/mq/msgstream --filename=mock_msgstream.go --with-expecter --structname=MockMsgStream --outpkg=msgstream --inpackage
$(INSTALL_PATH)/mockery --name=Client --dir=$(PWD)/mq/msgdispatcher --output=$(PWD)/mq/msgsdispatcher --filename=mock_client.go --with-expecter --structname=MockClient --outpkg=msgdispatcher --inpackage $(INSTALL_PATH)/mockery --name=Client --dir=$(PWD)/mq/msgdispatcher --output=$(PWD)/mq/msgsdispatcher --filename=mock_client.go --with-expecter --structname=MockClient --outpkg=msgdispatcher --inpackage
$(INSTALL_PATH)/mockery --name=Logger --dir=$(PWD)/eventlog --output=$(PWD)/eventlog --filename=mock_logger.go --with-expecter --structname=MockLogger --outpkg=eventlog --inpackage $(INSTALL_PATH)/mockery --name=Logger --dir=$(PWD)/eventlog --output=$(PWD)/eventlog --filename=mock_logger.go --with-expecter --structname=MockLogger --outpkg=eventlog --inpackage
$(INSTALL_PATH)/mockery --name=MessageID --dir=$(PWD)/mq/msgstream/mqwrapper --output=$(PWD)/mq/msgstream/mqwrapper --filename=mock_id.go --with-expecter --structname=MockMessageID --outpkg=mqwrapper --inpackage

View File

@ -13,7 +13,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/klauspost/compress v1.16.5 github.com/klauspost/compress v1.16.5
github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76 github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0 github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231008032233-5d64d443769d
github.com/nats-io/nats-server/v2 v2.9.17 github.com/nats-io/nats-server/v2 v2.9.17
github.com/nats-io/nats.go v1.24.0 github.com/nats-io/nats.go v1.24.0
github.com/panjf2000/ants/v2 v2.7.2 github.com/panjf2000/ants/v2 v2.7.2

View File

@ -477,8 +477,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0 h1:t5CKm7+FXuD2rDLv/H8tpN9iY8F2dZvHF87xWBx8muU= github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231008032233-5d64d443769d h1:K8yyzz8BCBm+wirhRgySyB8wN+sw33eB3VsLz6Slu5s=
github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek= github.com/milvus-io/milvus-proto/go-api/v2 v2.3.2-0.20231008032233-5d64d443769d/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A= github.com/milvus-io/pulsar-client-go v0.6.10 h1:eqpJjU+/QX0iIhEo3nhOqMNXL+TyInAs1IAHZCrCM/A=
github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w= github.com/milvus-io/pulsar-client-go v0.6.10/go.mod h1:lQqCkgwDF8YFYjKA+zOheTk1tev2B+bKj5j7+nm8M1w=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=

View File

@ -273,6 +273,39 @@ func (_c *MockMsgStream_Close_Call) RunAndReturn(run func()) *MockMsgStream_Clos
return _c return _c
} }
// EnableProduce provides a mock function with given fields: can
func (_m *MockMsgStream) EnableProduce(can bool) {
_m.Called(can)
}
// MockMsgStream_EnableProduce_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableProduce'
type MockMsgStream_EnableProduce_Call struct {
*mock.Call
}
// EnableProduce is a helper method to define mock.On call
// - can bool
func (_e *MockMsgStream_Expecter) EnableProduce(can interface{}) *MockMsgStream_EnableProduce_Call {
return &MockMsgStream_EnableProduce_Call{Call: _e.mock.On("EnableProduce", can)}
}
func (_c *MockMsgStream_EnableProduce_Call) Run(run func(can bool)) *MockMsgStream_EnableProduce_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(bool))
})
return _c
}
func (_c *MockMsgStream_EnableProduce_Call) Return() *MockMsgStream_EnableProduce_Call {
_c.Call.Return()
return _c
}
func (_c *MockMsgStream_EnableProduce_Call) RunAndReturn(run func(bool)) *MockMsgStream_EnableProduce_Call {
_c.Call.Return(run)
return _c
}
// GetLatestMsgID provides a mock function with given fields: channel // GetLatestMsgID provides a mock function with given fields: channel
func (_m *MockMsgStream) GetLatestMsgID(channel string) (mqwrapper.MessageID, error) { func (_m *MockMsgStream) GetLatestMsgID(channel string) (mqwrapper.MessageID, error) {
ret := _m.Called(channel) ret := _m.Called(channel)

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -31,6 +32,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb" "github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus/pkg/config"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper" "github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper"
"github.com/milvus-io/milvus/pkg/util/merr" "github.com/milvus-io/milvus/pkg/util/merr"
@ -50,16 +52,17 @@ type mqMsgStream struct {
consumers map[string]mqwrapper.Consumer consumers map[string]mqwrapper.Consumer
consumerChannels []string consumerChannels []string
repackFunc RepackFunc repackFunc RepackFunc
unmarshal UnmarshalDispatcher unmarshal UnmarshalDispatcher
receiveBuf chan *MsgPack receiveBuf chan *MsgPack
closeRWMutex *sync.RWMutex closeRWMutex *sync.RWMutex
streamCancel func() streamCancel func()
bufSize int64 bufSize int64
producerLock *sync.RWMutex producerLock *sync.RWMutex
consumerLock *sync.Mutex consumerLock *sync.Mutex
closed int32 closed int32
onceChan sync.Once onceChan sync.Once
enableProduce atomic.Value
} }
// NewMqMsgStream is used to generate a new mqMsgStream object // NewMqMsgStream is used to generate a new mqMsgStream object
@ -93,6 +96,18 @@ func NewMqMsgStream(ctx context.Context,
closeRWMutex: &sync.RWMutex{}, closeRWMutex: &sync.RWMutex{},
closed: 0, closed: 0,
} }
ctxLog := log.Ctx(ctx)
stream.enableProduce.Store(paramtable.Get().CommonCfg.TTMsgEnabled.GetAsBool())
paramtable.Get().Watch(paramtable.Get().CommonCfg.TTMsgEnabled.Key, config.NewHandler("enable send tt msg", func(event *config.Event) {
value, err := strconv.ParseBool(event.Value)
if err != nil {
ctxLog.Warn("Failed to parse bool value", zap.String("v", event.Value), zap.Error(err))
return
}
stream.enableProduce.Store(value)
ctxLog.Info("Msg Stream state updated", zap.Bool("can_produce", stream.isEnabledProduce()))
}))
ctxLog.Info("Msg Stream state", zap.Bool("can_produce", stream.isEnabledProduce()))
return stream, nil return stream, nil
} }
@ -241,7 +256,19 @@ func (ms *mqMsgStream) GetProduceChannels() []string {
return ms.producerChannels return ms.producerChannels
} }
func (ms *mqMsgStream) EnableProduce(can bool) {
ms.enableProduce.Store(can)
}
func (ms *mqMsgStream) isEnabledProduce() bool {
return ms.enableProduce.Load().(bool)
}
func (ms *mqMsgStream) Produce(msgPack *MsgPack) error { func (ms *mqMsgStream) Produce(msgPack *MsgPack) error {
if !ms.isEnabledProduce() {
log.Warn("can't produce the msg in the backup instance", zap.Stack("stack"))
return merr.ErrDenyProduceMsg
}
if msgPack == nil || len(msgPack.Msgs) <= 0 { if msgPack == nil || len(msgPack.Msgs) <= 0 {
log.Debug("Warning: Receive empty msgPack") log.Debug("Warning: Receive empty msgPack")
return nil return nil
@ -307,6 +334,14 @@ func (ms *mqMsgStream) Broadcast(msgPack *MsgPack) (map[string][]MessageID, erro
if msgPack == nil || len(msgPack.Msgs) <= 0 { if msgPack == nil || len(msgPack.Msgs) <= 0 {
return ids, errors.New("empty msgs") return ids, errors.New("empty msgs")
} }
// Only allow to create collection msg in backup instance
// However, there may be a problem of ts disorder here, but because the start position of the collection only uses offsets, not time, there is no problem for the time being
isCreateCollectionMsg := len(msgPack.Msgs) == 1 && msgPack.Msgs[0].Type() == commonpb.MsgType_CreateCollection
if !ms.isEnabledProduce() && !isCreateCollectionMsg {
log.Warn("can't broadcast the msg in the backup instance", zap.Stack("stack"))
return ids, merr.ErrDenyProduceMsg
}
for _, v := range msgPack.Msgs { for _, v := range msgPack.Msgs {
spanCtx, sp := MsgSpanFromCtx(v.TraceCtx(), v) spanCtx, sp := MsgSpanFromCtx(v.TraceCtx(), v)
@ -357,7 +392,6 @@ func (ms *mqMsgStream) getTsMsgFromConsumerMsg(msg mqwrapper.Message) (TsMsg, er
return nil, fmt.Errorf("failed to unmarshal tsMsg, err %s", err.Error()) return nil, fmt.Errorf("failed to unmarshal tsMsg, err %s", err.Error())
} }
// set msg info to tsMsg
tsMsg.SetPosition(&MsgPosition{ tsMsg.SetPosition(&MsgPosition{
ChannelName: filepath.Base(msg.Topic()), ChannelName: filepath.Base(msg.Topic()),
MsgID: msg.ID().Serialize(), MsgID: msg.ID().Serialize(),

View File

@ -111,6 +111,13 @@ func TestStream_PulsarMsgStream_Insert(t *testing.T) {
inputStream := getPulsarInputStream(ctx, pulsarAddress, producerChannels) inputStream := getPulsarInputStream(ctx, pulsarAddress, producerChannels)
outputStream := getPulsarOutputStream(ctx, pulsarAddress, consumerChannels, consumerSubName) outputStream := getPulsarOutputStream(ctx, pulsarAddress, consumerChannels, consumerSubName)
{
inputStream.EnableProduce(false)
err := inputStream.Produce(&msgPack)
require.Error(t, err)
}
inputStream.EnableProduce(true)
err := inputStream.Produce(&msgPack) err := inputStream.Produce(&msgPack)
require.NoErrorf(t, err, fmt.Sprintf("produce error = %v", err)) require.NoErrorf(t, err, fmt.Sprintf("produce error = %v", err))
@ -177,6 +184,13 @@ func TestStream_PulsarMsgStream_BroadCast(t *testing.T) {
inputStream := getPulsarInputStream(ctx, pulsarAddress, producerChannels) inputStream := getPulsarInputStream(ctx, pulsarAddress, producerChannels)
outputStream := getPulsarOutputStream(ctx, pulsarAddress, consumerChannels, consumerSubName) outputStream := getPulsarOutputStream(ctx, pulsarAddress, consumerChannels, consumerSubName)
{
inputStream.EnableProduce(false)
_, err := inputStream.Broadcast(&msgPack)
require.Error(t, err)
}
inputStream.EnableProduce(true)
_, err := inputStream.Broadcast(&msgPack) _, err := inputStream.Broadcast(&msgPack)
require.NoErrorf(t, err, fmt.Sprintf("broadcast error = %v", err)) require.NoErrorf(t, err, fmt.Sprintf("broadcast error = %v", err))

View File

@ -59,7 +59,7 @@ type Consumer interface {
// Get Message channel, once you chan you can not seek again // Get Message channel, once you chan you can not seek again
Chan() <-chan Message Chan() <-chan Message
// Seek to the uniqueID position // Seek to the uniqueID position, the second bool param indicates whether the message is included in the position
Seek(MessageID, bool) error //nolint:govet Seek(MessageID, bool) error //nolint:govet
// Ack make sure that msg is received // Ack make sure that msg is received

View File

@ -0,0 +1,220 @@
// Code generated by mockery v2.32.4. DO NOT EDIT.
package mqwrapper
import mock "github.com/stretchr/testify/mock"
// MockMessageID is an autogenerated mock type for the MessageID type
type MockMessageID struct {
mock.Mock
}
type MockMessageID_Expecter struct {
mock *mock.Mock
}
func (_m *MockMessageID) EXPECT() *MockMessageID_Expecter {
return &MockMessageID_Expecter{mock: &_m.Mock}
}
// AtEarliestPosition provides a mock function with given fields:
func (_m *MockMessageID) AtEarliestPosition() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockMessageID_AtEarliestPosition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AtEarliestPosition'
type MockMessageID_AtEarliestPosition_Call struct {
*mock.Call
}
// AtEarliestPosition is a helper method to define mock.On call
func (_e *MockMessageID_Expecter) AtEarliestPosition() *MockMessageID_AtEarliestPosition_Call {
return &MockMessageID_AtEarliestPosition_Call{Call: _e.mock.On("AtEarliestPosition")}
}
func (_c *MockMessageID_AtEarliestPosition_Call) Run(run func()) *MockMessageID_AtEarliestPosition_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockMessageID_AtEarliestPosition_Call) Return(_a0 bool) *MockMessageID_AtEarliestPosition_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockMessageID_AtEarliestPosition_Call) RunAndReturn(run func() bool) *MockMessageID_AtEarliestPosition_Call {
_c.Call.Return(run)
return _c
}
// Equal provides a mock function with given fields: msgID
func (_m *MockMessageID) Equal(msgID []byte) (bool, error) {
ret := _m.Called(msgID)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func([]byte) (bool, error)); ok {
return rf(msgID)
}
if rf, ok := ret.Get(0).(func([]byte) bool); ok {
r0 = rf(msgID)
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func([]byte) error); ok {
r1 = rf(msgID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockMessageID_Equal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Equal'
type MockMessageID_Equal_Call struct {
*mock.Call
}
// Equal is a helper method to define mock.On call
// - msgID []byte
func (_e *MockMessageID_Expecter) Equal(msgID interface{}) *MockMessageID_Equal_Call {
return &MockMessageID_Equal_Call{Call: _e.mock.On("Equal", msgID)}
}
func (_c *MockMessageID_Equal_Call) Run(run func(msgID []byte)) *MockMessageID_Equal_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].([]byte))
})
return _c
}
func (_c *MockMessageID_Equal_Call) Return(_a0 bool, _a1 error) *MockMessageID_Equal_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockMessageID_Equal_Call) RunAndReturn(run func([]byte) (bool, error)) *MockMessageID_Equal_Call {
_c.Call.Return(run)
return _c
}
// LessOrEqualThan provides a mock function with given fields: msgID
func (_m *MockMessageID) LessOrEqualThan(msgID []byte) (bool, error) {
ret := _m.Called(msgID)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func([]byte) (bool, error)); ok {
return rf(msgID)
}
if rf, ok := ret.Get(0).(func([]byte) bool); ok {
r0 = rf(msgID)
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func([]byte) error); ok {
r1 = rf(msgID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockMessageID_LessOrEqualThan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LessOrEqualThan'
type MockMessageID_LessOrEqualThan_Call struct {
*mock.Call
}
// LessOrEqualThan is a helper method to define mock.On call
// - msgID []byte
func (_e *MockMessageID_Expecter) LessOrEqualThan(msgID interface{}) *MockMessageID_LessOrEqualThan_Call {
return &MockMessageID_LessOrEqualThan_Call{Call: _e.mock.On("LessOrEqualThan", msgID)}
}
func (_c *MockMessageID_LessOrEqualThan_Call) Run(run func(msgID []byte)) *MockMessageID_LessOrEqualThan_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].([]byte))
})
return _c
}
func (_c *MockMessageID_LessOrEqualThan_Call) Return(_a0 bool, _a1 error) *MockMessageID_LessOrEqualThan_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockMessageID_LessOrEqualThan_Call) RunAndReturn(run func([]byte) (bool, error)) *MockMessageID_LessOrEqualThan_Call {
_c.Call.Return(run)
return _c
}
// Serialize provides a mock function with given fields:
func (_m *MockMessageID) Serialize() []byte {
ret := _m.Called()
var r0 []byte
if rf, ok := ret.Get(0).(func() []byte); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
return r0
}
// MockMessageID_Serialize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Serialize'
type MockMessageID_Serialize_Call struct {
*mock.Call
}
// Serialize is a helper method to define mock.On call
func (_e *MockMessageID_Expecter) Serialize() *MockMessageID_Serialize_Call {
return &MockMessageID_Serialize_Call{Call: _e.mock.On("Serialize")}
}
func (_c *MockMessageID_Serialize_Call) Run(run func()) *MockMessageID_Serialize_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockMessageID_Serialize_Call) Return(_a0 []byte) *MockMessageID_Serialize_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockMessageID_Serialize_Call) RunAndReturn(run func() []byte) *MockMessageID_Serialize_Call {
_c.Call.Return(run)
return _c
}
// NewMockMessageID creates a new instance of MockMessageID. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockMessageID(t interface {
mock.TestingT
Cleanup(func())
}) *MockMessageID {
mock := &MockMessageID{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,190 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
// LoadCollectionMsg is a message pack that contains load collection request
type LoadCollectionMsg struct {
BaseMsg
milvuspb.LoadCollectionRequest
}
// interface implementation validation
var _ TsMsg = &LoadCollectionMsg{}
func (l *LoadCollectionMsg) ID() UniqueID {
return l.Base.MsgID
}
func (l *LoadCollectionMsg) SetID(id UniqueID) {
l.Base.MsgID = id
}
func (l *LoadCollectionMsg) Type() MsgType {
return l.Base.MsgType
}
func (l *LoadCollectionMsg) SourceID() int64 {
return l.Base.SourceID
}
func (l *LoadCollectionMsg) Marshal(input TsMsg) (MarshalType, error) {
loadCollectionMsg := input.(*LoadCollectionMsg)
createIndexRequest := &loadCollectionMsg.LoadCollectionRequest
mb, err := proto.Marshal(createIndexRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (l *LoadCollectionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
loadCollectionRequest := milvuspb.LoadCollectionRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &loadCollectionRequest)
if err != nil {
return nil, err
}
loadCollectionMsg := &LoadCollectionMsg{LoadCollectionRequest: loadCollectionRequest}
loadCollectionMsg.BeginTimestamp = loadCollectionMsg.GetBase().GetTimestamp()
loadCollectionMsg.EndTimestamp = loadCollectionMsg.GetBase().GetTimestamp()
return loadCollectionMsg, nil
}
func (l *LoadCollectionMsg) Size() int {
return proto.Size(&l.LoadCollectionRequest)
}
// ReleaseCollectionMsg is a message pack that contains release collection request
type ReleaseCollectionMsg struct {
BaseMsg
milvuspb.ReleaseCollectionRequest
}
var _ TsMsg = &ReleaseCollectionMsg{}
func (r *ReleaseCollectionMsg) ID() UniqueID {
return r.Base.MsgID
}
func (r *ReleaseCollectionMsg) SetID(id UniqueID) {
r.Base.MsgID = id
}
func (r *ReleaseCollectionMsg) Type() MsgType {
return r.Base.MsgType
}
func (r *ReleaseCollectionMsg) SourceID() int64 {
return r.Base.SourceID
}
func (r *ReleaseCollectionMsg) Marshal(input TsMsg) (MarshalType, error) {
releaseCollectionMsg := input.(*ReleaseCollectionMsg)
releaseCollectionRequest := &releaseCollectionMsg.ReleaseCollectionRequest
mb, err := proto.Marshal(releaseCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (r *ReleaseCollectionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
releaseCollectionRequest := milvuspb.ReleaseCollectionRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &releaseCollectionRequest)
if err != nil {
return nil, err
}
releaseCollectionMsg := &ReleaseCollectionMsg{ReleaseCollectionRequest: releaseCollectionRequest}
releaseCollectionMsg.BeginTimestamp = releaseCollectionMsg.GetBase().GetTimestamp()
releaseCollectionMsg.EndTimestamp = releaseCollectionMsg.GetBase().GetTimestamp()
return releaseCollectionMsg, nil
}
func (r *ReleaseCollectionMsg) Size() int {
return proto.Size(&r.ReleaseCollectionRequest)
}
type FlushMsg struct {
BaseMsg
milvuspb.FlushRequest
}
var _ TsMsg = &FlushMsg{}
func (f *FlushMsg) ID() UniqueID {
return f.Base.MsgID
}
func (f *FlushMsg) SetID(id UniqueID) {
f.Base.MsgID = id
}
func (f *FlushMsg) Type() MsgType {
return f.Base.MsgType
}
func (f *FlushMsg) SourceID() int64 {
return f.Base.SourceID
}
func (f *FlushMsg) Marshal(input TsMsg) (MarshalType, error) {
flushMsg := input.(*FlushMsg)
flushRequest := &flushMsg.FlushRequest
mb, err := proto.Marshal(flushRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (f *FlushMsg) Unmarshal(input MarshalType) (TsMsg, error) {
flushRequest := milvuspb.FlushRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &flushRequest)
if err != nil {
return nil, err
}
flushMsg := &FlushMsg{FlushRequest: flushRequest}
flushMsg.BeginTimestamp = flushMsg.GetBase().GetTimestamp()
flushMsg.EndTimestamp = flushMsg.GetBase().GetTimestamp()
return flushMsg, nil
}
func (f *FlushMsg) Size() int {
return proto.Size(&f.FlushRequest)
}

View File

@ -0,0 +1,139 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
func TestFlushMsg(t *testing.T) {
var msg TsMsg = &FlushMsg{
FlushRequest: milvuspb.FlushRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
CollectionNames: []string{"col1", "col2"},
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_Flush, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &FlushMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}
func TestLoadCollection(t *testing.T) {
var msg TsMsg = &LoadCollectionMsg{
LoadCollectionRequest: milvuspb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
CollectionName: "col1",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_LoadCollection, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &LoadCollectionMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}
func TestReleaseCollection(t *testing.T) {
var msg TsMsg = &ReleaseCollectionMsg{
ReleaseCollectionRequest: milvuspb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ReleaseCollection,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
CollectionName: "col1",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_ReleaseCollection, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &ReleaseCollectionMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}

View File

@ -0,0 +1,133 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
type CreateDatabaseMsg struct {
BaseMsg
milvuspb.CreateDatabaseRequest
}
var _ TsMsg = &CreateDatabaseMsg{}
func (c *CreateDatabaseMsg) ID() UniqueID {
return c.Base.MsgID
}
func (c *CreateDatabaseMsg) SetID(id UniqueID) {
c.Base.MsgID = id
}
func (c *CreateDatabaseMsg) Type() MsgType {
return c.Base.MsgType
}
func (c *CreateDatabaseMsg) SourceID() int64 {
return c.Base.SourceID
}
func (c *CreateDatabaseMsg) Marshal(input TsMsg) (MarshalType, error) {
createDataBaseMsg := input.(*CreateDatabaseMsg)
createDatabaseRequest := &createDataBaseMsg.CreateDatabaseRequest
mb, err := proto.Marshal(createDatabaseRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (c *CreateDatabaseMsg) Unmarshal(input MarshalType) (TsMsg, error) {
createDatabaseRequest := milvuspb.CreateDatabaseRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &createDatabaseRequest)
if err != nil {
return nil, err
}
createDatabaseMsg := &CreateDatabaseMsg{CreateDatabaseRequest: createDatabaseRequest}
createDatabaseMsg.BeginTimestamp = createDatabaseMsg.GetBase().GetTimestamp()
createDatabaseMsg.EndTimestamp = createDatabaseMsg.GetBase().GetTimestamp()
return createDatabaseMsg, nil
}
func (c *CreateDatabaseMsg) Size() int {
return proto.Size(&c.CreateDatabaseRequest)
}
type DropDatabaseMsg struct {
BaseMsg
milvuspb.DropDatabaseRequest
}
var _ TsMsg = &DropDatabaseMsg{}
func (d *DropDatabaseMsg) ID() UniqueID {
return d.Base.MsgID
}
func (d *DropDatabaseMsg) SetID(id UniqueID) {
d.Base.MsgID = id
}
func (d *DropDatabaseMsg) Type() MsgType {
return d.Base.MsgType
}
func (d *DropDatabaseMsg) SourceID() int64 {
return d.Base.SourceID
}
func (d *DropDatabaseMsg) Marshal(input TsMsg) (MarshalType, error) {
dropDataBaseMsg := input.(*DropDatabaseMsg)
dropDatabaseRequest := &dropDataBaseMsg.DropDatabaseRequest
mb, err := proto.Marshal(dropDatabaseRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (d *DropDatabaseMsg) Unmarshal(input MarshalType) (TsMsg, error) {
dropDatabaseRequest := milvuspb.DropDatabaseRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &dropDatabaseRequest)
if err != nil {
return nil, err
}
dropDatabaseMsg := &DropDatabaseMsg{DropDatabaseRequest: dropDatabaseRequest}
dropDatabaseMsg.BeginTimestamp = dropDatabaseMsg.GetBase().GetTimestamp()
dropDatabaseMsg.EndTimestamp = dropDatabaseMsg.GetBase().GetTimestamp()
return dropDatabaseMsg, nil
}
func (d *DropDatabaseMsg) Size() int {
return proto.Size(&d.DropDatabaseRequest)
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
func TestCreateDatabase(t *testing.T) {
var msg TsMsg = &CreateDatabaseMsg{
CreateDatabaseRequest: milvuspb.CreateDatabaseRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_CreateDatabase,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_CreateDatabase, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &ReleaseCollectionMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}
func TestDropDatabase(t *testing.T) {
var msg TsMsg = &DropDatabaseMsg{
DropDatabaseRequest: milvuspb.DropDatabaseRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropDatabase,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_DropDatabase, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &DropDatabaseMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}

View File

@ -0,0 +1,142 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
// CreateIndexMsg is a message pack that contains create index request
type CreateIndexMsg struct {
BaseMsg
milvuspb.CreateIndexRequest
}
// interface implementation validation
var _ TsMsg = &CreateIndexMsg{}
// ID returns the ID of this message pack
func (it *CreateIndexMsg) ID() UniqueID {
return it.Base.MsgID
}
// SetID set the ID of this message pack
func (it *CreateIndexMsg) SetID(id UniqueID) {
it.Base.MsgID = id
}
// Type returns the type of this message pack
func (it *CreateIndexMsg) Type() MsgType {
return it.Base.MsgType
}
// SourceID indicates which component generated this message
func (it *CreateIndexMsg) SourceID() int64 {
return it.Base.SourceID
}
// Marshal is used to serialize a message pack to byte array
func (it *CreateIndexMsg) Marshal(input TsMsg) (MarshalType, error) {
createIndexMsg := input.(*CreateIndexMsg)
createIndexRequest := &createIndexMsg.CreateIndexRequest
mb, err := proto.Marshal(createIndexRequest)
if err != nil {
return nil, err
}
return mb, nil
}
// Unmarshal is used to deserialize a message pack from byte array
func (it *CreateIndexMsg) Unmarshal(input MarshalType) (TsMsg, error) {
createIndexRequest := milvuspb.CreateIndexRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &createIndexRequest)
if err != nil {
return nil, err
}
createIndexMsg := &CreateIndexMsg{CreateIndexRequest: createIndexRequest}
createIndexMsg.BeginTimestamp = createIndexMsg.GetBase().GetTimestamp()
createIndexMsg.EndTimestamp = createIndexMsg.GetBase().GetTimestamp()
return createIndexMsg, nil
}
func (it *CreateIndexMsg) Size() int {
return proto.Size(&it.CreateIndexRequest)
}
// DropIndexMsg is a message pack that contains drop index request
type DropIndexMsg struct {
BaseMsg
milvuspb.DropIndexRequest
}
var _ TsMsg = &DropIndexMsg{}
func (d *DropIndexMsg) ID() UniqueID {
return d.Base.MsgID
}
func (d *DropIndexMsg) SetID(id UniqueID) {
d.Base.MsgID = id
}
func (d *DropIndexMsg) Type() MsgType {
return d.Base.MsgType
}
func (d *DropIndexMsg) SourceID() int64 {
return d.Base.SourceID
}
func (d *DropIndexMsg) Marshal(input TsMsg) (MarshalType, error) {
dropIndexMsg := input.(*DropIndexMsg)
dropIndexRequest := &dropIndexMsg.DropIndexRequest
mb, err := proto.Marshal(dropIndexRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (d *DropIndexMsg) Unmarshal(input MarshalType) (TsMsg, error) {
dropIndexRequest := milvuspb.DropIndexRequest{}
in, err := convertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &dropIndexRequest)
if err != nil {
return nil, err
}
dropIndexMsg := &DropIndexMsg{DropIndexRequest: dropIndexRequest}
dropIndexMsg.BeginTimestamp = dropIndexMsg.GetBase().GetTimestamp()
dropIndexMsg.EndTimestamp = dropIndexMsg.GetBase().GetTimestamp()
return dropIndexMsg, nil
}
func (d *DropIndexMsg) Size() int {
return proto.Size(&d.DropIndexRequest)
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to the LF AI & Data foundation under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package msgstream
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
)
func TestCreateIndex(t *testing.T) {
var msg TsMsg = &CreateIndexMsg{
CreateIndexRequest: milvuspb.CreateIndexRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_CreateIndex,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_CreateIndex, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &ReleaseCollectionMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}
func TestDropIndex(t *testing.T) {
var msg TsMsg = &DropIndexMsg{
DropIndexRequest: milvuspb.DropIndexRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropIndex,
MsgID: 100,
Timestamp: 1000,
SourceID: 10000,
TargetID: 100000,
ReplicateInfo: nil,
},
DbName: "unit_db",
},
}
assert.EqualValues(t, 100, msg.ID())
msg.SetID(200)
assert.EqualValues(t, 200, msg.ID())
assert.Equal(t, commonpb.MsgType_DropIndex, msg.Type())
assert.EqualValues(t, 10000, msg.SourceID())
msgBytes, err := msg.Marshal(msg)
assert.NoError(t, err)
var newMsg TsMsg = &ReleaseCollectionMsg{}
_, err = newMsg.Unmarshal("1")
assert.Error(t, err)
newMsg, err = newMsg.Unmarshal(msgBytes)
assert.NoError(t, err)
assert.EqualValues(t, 200, newMsg.ID())
assert.EqualValues(t, 1000, newMsg.BeginTs())
assert.EqualValues(t, 1000, newMsg.EndTs())
assert.True(t, msg.Size() > 0)
}

View File

@ -67,6 +67,8 @@ type MsgStream interface {
GetLatestMsgID(channel string) (MessageID, error) GetLatestMsgID(channel string) (MessageID, error)
CheckTopicValid(channel string) error CheckTopicValid(channel string) error
EnableProduce(can bool)
} }
type Factory interface { type Factory interface {

View File

@ -18,10 +18,13 @@ package msgstream
import ( import (
"context" "context"
"fmt"
"math/rand"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper"
) )
// unsubscribeChannels create consumer first, and unsubscribe channel through msgStream.close() // unsubscribeChannels create consumer first, and unsubscribe channel through msgStream.close()
@ -34,3 +37,25 @@ func UnsubscribeChannels(ctx context.Context, factory Factory, subName string, c
panic(err) panic(err)
} }
} }
func GetChannelLatestMsgID(ctx context.Context, factory Factory, channelName string) ([]byte, error) {
dmlStream, err := factory.NewMsgStream(ctx)
if err != nil {
log.Warn("fail to NewMsgStream", zap.String("channelName", channelName), zap.Error(err))
return nil, err
}
defer dmlStream.Close()
subName := fmt.Sprintf("get-latest_msg_id-%s-%d", channelName, rand.Int())
err = dmlStream.AsConsumer(ctx, []string{channelName}, subName, mqwrapper.SubscriptionPositionUnknown)
if err != nil {
log.Warn("fail to AsConsumer", zap.String("channelName", channelName), zap.Error(err))
return nil, err
}
id, err := dmlStream.GetLatestMsgID(channelName)
if err != nil {
log.Error("fail to GetLatestMsgID", zap.String("channelName", channelName), zap.Error(err))
return nil, err
}
return id.Serialize(), nil
}

View File

@ -20,7 +20,11 @@ import (
"context" "context"
"testing" "testing"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper"
) )
func TestPulsarMsgUtil(t *testing.T) { func TestPulsarMsgUtil(t *testing.T) {
@ -36,3 +40,43 @@ func TestPulsarMsgUtil(t *testing.T) {
UnsubscribeChannels(ctx, pmsFactory, "sub", []string{"test"}) UnsubscribeChannels(ctx, pmsFactory, "sub", []string{"test"})
} }
func TestGetLatestMsgID(t *testing.T) {
factory := NewMockMqFactory()
ctx := context.Background()
{
factory.NewMsgStreamFunc = func(ctx context.Context) (MsgStream, error) {
return nil, errors.New("mock")
}
_, err := GetChannelLatestMsgID(ctx, factory, "test")
assert.Error(t, err)
}
stream := NewMockMsgStream(t)
factory.NewMsgStreamFunc = func(ctx context.Context) (MsgStream, error) {
return stream, nil
}
stream.EXPECT().Close().Return()
{
stream.EXPECT().AsConsumer(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mock")).Once()
_, err := GetChannelLatestMsgID(ctx, factory, "test")
assert.Error(t, err)
}
{
stream.EXPECT().AsConsumer(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
stream.EXPECT().GetLatestMsgID(mock.Anything).Return(nil, errors.New("mock")).Once()
_, err := GetChannelLatestMsgID(ctx, factory, "test")
assert.Error(t, err)
}
{
mockMsgID := mqwrapper.NewMockMessageID(t)
mockMsgID.EXPECT().Serialize().Return([]byte("mock")).Once()
stream.EXPECT().AsConsumer(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
stream.EXPECT().GetLatestMsgID(mock.Anything).Return(mockMsgID, nil).Once()
id, err := GetChannelLatestMsgID(ctx, factory, "test")
assert.NoError(t, err)
assert.Equal(t, []byte("mock"), id)
}
}

View File

@ -63,6 +63,16 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
dropPartitionMsg := DropPartitionMsg{} dropPartitionMsg := DropPartitionMsg{}
dataNodeTtMsg := DataNodeTtMsg{} dataNodeTtMsg := DataNodeTtMsg{}
createIndexMsg := CreateIndexMsg{}
dropIndexMsg := DropIndexMsg{}
loadCollectionMsg := LoadCollectionMsg{}
releaseCollectionMsg := ReleaseCollectionMsg{}
flushMsg := FlushMsg{}
createDatabaseMsg := CreateDatabaseMsg{}
dropDatabaseMsg := DropDatabaseMsg{}
p := &ProtoUnmarshalDispatcher{} p := &ProtoUnmarshalDispatcher{}
p.TempMap = make(map[commonpb.MsgType]UnmarshalFunc) p.TempMap = make(map[commonpb.MsgType]UnmarshalFunc)
p.TempMap[commonpb.MsgType_Insert] = insertMsg.Unmarshal p.TempMap[commonpb.MsgType_Insert] = insertMsg.Unmarshal
@ -73,6 +83,13 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
p.TempMap[commonpb.MsgType_CreatePartition] = createPartitionMsg.Unmarshal p.TempMap[commonpb.MsgType_CreatePartition] = createPartitionMsg.Unmarshal
p.TempMap[commonpb.MsgType_DropPartition] = dropPartitionMsg.Unmarshal p.TempMap[commonpb.MsgType_DropPartition] = dropPartitionMsg.Unmarshal
p.TempMap[commonpb.MsgType_DataNodeTt] = dataNodeTtMsg.Unmarshal p.TempMap[commonpb.MsgType_DataNodeTt] = dataNodeTtMsg.Unmarshal
p.TempMap[commonpb.MsgType_CreateIndex] = createIndexMsg.Unmarshal
p.TempMap[commonpb.MsgType_DropIndex] = dropIndexMsg.Unmarshal
p.TempMap[commonpb.MsgType_LoadCollection] = loadCollectionMsg.Unmarshal
p.TempMap[commonpb.MsgType_ReleaseCollection] = releaseCollectionMsg.Unmarshal
p.TempMap[commonpb.MsgType_Flush] = flushMsg.Unmarshal
p.TempMap[commonpb.MsgType_CreateDatabase] = createDatabaseMsg.Unmarshal
p.TempMap[commonpb.MsgType_DropDatabase] = dropDatabaseMsg.Unmarshal
return p return p
} }

View File

@ -105,6 +105,7 @@ var (
ErrMqTopicNotFound = newMilvusError("topic not found", 1300, false) ErrMqTopicNotFound = newMilvusError("topic not found", 1300, false)
ErrMqTopicNotEmpty = newMilvusError("topic not empty", 1301, false) ErrMqTopicNotEmpty = newMilvusError("topic not empty", 1301, false)
ErrMqInternal = newMilvusError("message queue internal error", 1302, false) ErrMqInternal = newMilvusError("message queue internal error", 1302, false)
ErrDenyProduceMsg = newMilvusError("deny to write the message to mq", 1303, false)
// Privilege related // Privilege related
// this operation is denied because the user not authorized, user need to login in first // this operation is denied because the user not authorized, user need to login in first
@ -130,6 +131,12 @@ var (
ErrInvalidSearchResult = newMilvusError("fail to parse search result", 1805, false) ErrInvalidSearchResult = newMilvusError("fail to parse search result", 1805, false)
ErrCheckPrimaryKey = newMilvusError("please check the primary key and its' type can only in [int, string]", 1806, false) ErrCheckPrimaryKey = newMilvusError("please check the primary key and its' type can only in [int, string]", 1806, false)
// replicate related
ErrDenyReplicateMessage = newMilvusError("deny to use the replicate message in the normal instance", 1900, false)
ErrInvalidMsgBytes = newMilvusError("invalid replicate msg bytes", 1901, false)
ErrNoAssignSegmentID = newMilvusError("no assign segment id", 1902, false)
ErrInvalidStreamObj = newMilvusError("invalid stream object", 1903, false)
// Segcore related // Segcore related
ErrSegcore = newMilvusError("segcore error", 2000, false) ErrSegcore = newMilvusError("segcore error", 2000, false)

View File

@ -162,6 +162,7 @@ type commonConfig struct {
RootCoordTimeTick ParamItem `refreshable:"true"` RootCoordTimeTick ParamItem `refreshable:"true"`
RootCoordStatistics ParamItem `refreshable:"true"` RootCoordStatistics ParamItem `refreshable:"true"`
RootCoordDml ParamItem `refreshable:"false"` RootCoordDml ParamItem `refreshable:"false"`
ReplicateMsgChannel ParamItem `refreshable:"false"`
QueryCoordTimeTick ParamItem `refreshable:"true"` QueryCoordTimeTick ParamItem `refreshable:"true"`
@ -216,6 +217,8 @@ type commonConfig struct {
EnableLockMetrics ParamItem `refreshable:"false"` EnableLockMetrics ParamItem `refreshable:"false"`
LockSlowLogInfoThreshold ParamItem `refreshable:"true"` LockSlowLogInfoThreshold ParamItem `refreshable:"true"`
LockSlowLogWarnThreshold ParamItem `refreshable:"true"` LockSlowLogWarnThreshold ParamItem `refreshable:"true"`
TTMsgEnabled ParamItem `refreshable:"true"`
} }
func (p *commonConfig) init(base *BaseTable) { func (p *commonConfig) init(base *BaseTable) {
@ -266,6 +269,16 @@ func (p *commonConfig) init(base *BaseTable) {
} }
p.RootCoordDml.Init(base.mgr) p.RootCoordDml.Init(base.mgr)
p.ReplicateMsgChannel = ParamItem{
Key: "msgChannel.chanNamePrefix.replicateMsg",
Version: "2.3.2",
FallbackKeys: []string{"common.chanNamePrefix.replicateMsg"},
PanicIfEmpty: true,
Formatter: chanNamePrefix,
Export: true,
}
p.ReplicateMsgChannel.Init(base.mgr)
p.QueryCoordTimeTick = ParamItem{ p.QueryCoordTimeTick = ParamItem{
Key: "msgChannel.chanNamePrefix.queryTimeTick", Key: "msgChannel.chanNamePrefix.queryTimeTick",
Version: "2.1.0", Version: "2.1.0",
@ -612,6 +625,14 @@ like the old password verification when updating the credential`,
Export: true, Export: true,
} }
p.LockSlowLogWarnThreshold.Init(base.mgr) p.LockSlowLogWarnThreshold.Init(base.mgr)
p.TTMsgEnabled = ParamItem{
Key: "common.ttMsgEnabled",
Version: "2.3.2",
DefaultValue: "true",
Doc: "Whether the instance disable sending ts messages",
}
p.TTMsgEnabled.Init(base.mgr)
} }
type traceConfig struct { type traceConfig struct {
@ -1143,11 +1164,11 @@ type queryCoordConfig struct {
TaskMergeCap ParamItem `refreshable:"false"` TaskMergeCap ParamItem `refreshable:"false"`
TaskExecutionCap ParamItem `refreshable:"true"` TaskExecutionCap ParamItem `refreshable:"true"`
//---- Handoff --- // ---- Handoff ---
//Deprecated: Since 2.2.2 // Deprecated: Since 2.2.2
AutoHandoff ParamItem `refreshable:"true"` AutoHandoff ParamItem `refreshable:"true"`
//---- Balance --- // ---- Balance ---
AutoBalance ParamItem `refreshable:"true"` AutoBalance ParamItem `refreshable:"true"`
Balancer ParamItem `refreshable:"true"` Balancer ParamItem `refreshable:"true"`
GlobalRowCountFactor ParamItem `refreshable:"true"` GlobalRowCountFactor ParamItem `refreshable:"true"`
@ -1186,7 +1207,7 @@ type queryCoordConfig struct {
} }
func (p *queryCoordConfig) init(base *BaseTable) { func (p *queryCoordConfig) init(base *BaseTable) {
//---- Task --- // ---- Task ---
p.RetryNum = ParamItem{ p.RetryNum = ParamItem{
Key: "queryCoord.task.retrynum", Key: "queryCoord.task.retrynum",
Version: "2.2.0", Version: "2.2.0",

View File

@ -1,7 +1,10 @@
package paramtable package paramtable
import ( import (
"go.uber.org/zap"
"github.com/milvus-io/milvus/pkg/config" "github.com/milvus-io/milvus/pkg/config"
"github.com/milvus-io/milvus/pkg/log"
) )
const hookYamlFile = "hook.yaml" const hookYamlFile = "hook.yaml"
@ -15,6 +18,7 @@ type hookConfig struct {
func (h *hookConfig) init(base *BaseTable) { func (h *hookConfig) init(base *BaseTable) {
h.hookBase = base h.hookBase = base
log.Info("hook config", zap.Any("hook", base.FileConfigs()))
h.SoPath = ParamItem{ h.SoPath = ParamItem{
Key: "soPath", Key: "soPath",

View File

@ -0,0 +1,301 @@
package resource
import (
"sync"
"time"
)
const (
NoExpiration time.Duration = -1
DefaultCheckInterval = 2 * time.Second
DefaultExpiration = 4 * time.Second
)
type Resource interface {
Type() string
Name() string
Get() any
Close()
// KeepAliveTime returns the time duration of the resource keep alive if the resource isn't used.
KeepAliveTime() time.Duration
}
type wrapper struct {
res Resource
obj any
typ string
name string
closeFunc func()
keepAliveTime time.Duration
}
func (w *wrapper) Type() string {
if w.typ != "" {
return w.typ
}
if w.res == nil {
return ""
}
return w.res.Type()
}
func (w *wrapper) Name() string {
if w.name != "" {
return w.name
}
if w.res == nil {
return ""
}
return w.res.Name()
}
func (w *wrapper) Get() any {
if w.obj != nil {
return w.obj
}
if w.res == nil {
return nil
}
return w.res.Get()
}
func (w *wrapper) Close() {
if w.res != nil {
w.res.Close()
}
if w.closeFunc != nil {
w.closeFunc()
}
}
func (w *wrapper) KeepAliveTime() time.Duration {
if w.keepAliveTime != 0 {
return w.keepAliveTime
}
if w.res == nil {
return 0
}
return w.res.KeepAliveTime()
}
type Option func(res *wrapper)
func WithResource(res Resource) Option {
return func(w *wrapper) {
w.res = res
}
}
func WithType(typ string) Option {
return func(res *wrapper) {
res.typ = typ
}
}
func WithName(name string) Option {
return func(res *wrapper) {
res.name = name
}
}
func WithObj(obj any) Option {
return func(res *wrapper) {
res.obj = obj
}
}
func WithCloseFunc(closeFunc func()) Option {
return func(res *wrapper) {
res.closeFunc = closeFunc
}
}
func WithKeepAliveTime(keepAliveTime time.Duration) Option {
return func(res *wrapper) {
res.keepAliveTime = keepAliveTime
}
}
func NewResource(opts ...Option) Resource {
w := &wrapper{}
for _, opt := range opts {
opt(w)
}
return w
}
func NewSimpleResource(obj any, typ, name string, keepAliveTime time.Duration, closeFunc func()) Resource {
return NewResource(WithObj(obj), WithType(typ), WithName(name), WithKeepAliveTime(keepAliveTime), WithCloseFunc(closeFunc))
}
type Manager interface {
Get(typ, name string, newResourceFunc NewResourceFunc) (Resource, error)
Delete(typ, name string) Resource
Close()
}
type item struct {
res Resource
updateTimeChan chan int64
deleteMark chan struct{}
expiration int64
}
type manager struct {
resources map[string]map[string]*item // key: resource type, value: resource name -> resource
checkInterval time.Duration
defaultExpiration time.Duration
defaultTypeExpirations map[string]time.Duration // key: resource type, value: expiration
mu sync.RWMutex
wg sync.WaitGroup
stop chan struct{}
stopOnce sync.Once
}
func NewManager(checkInterval, defaultExpiration time.Duration, defaultTypeExpirations map[string]time.Duration) Manager {
if checkInterval <= 0 {
checkInterval = DefaultCheckInterval
}
if defaultExpiration <= 0 {
defaultExpiration = DefaultExpiration
}
if defaultTypeExpirations == nil {
defaultTypeExpirations = make(map[string]time.Duration)
}
m := &manager{
resources: make(map[string]map[string]*item),
checkInterval: checkInterval,
defaultExpiration: defaultExpiration,
defaultTypeExpirations: defaultTypeExpirations,
stop: make(chan struct{}),
}
m.wg.Add(1)
go m.backgroundGC()
return m
}
func (m *manager) backgroundGC() {
ticker := time.NewTicker(m.checkInterval)
defer m.wg.Done()
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.gc()
case <-m.stop:
m.mu.Lock()
for _, typMap := range m.resources {
for _, item := range typMap {
item.res.Close()
}
}
m.resources = nil
m.mu.Unlock()
return
}
}
}
func (m *manager) gc() {
m.mu.Lock()
defer m.mu.Unlock()
now := time.Now().UnixNano()
for typ, typMap := range m.resources {
for resName, item := range typMap {
select {
case lastTime := <-item.updateTimeChan:
if item.expiration >= 0 {
item.expiration = lastTime
}
case <-item.deleteMark:
item.res.Close()
delete(typMap, resName)
default:
if item.expiration >= 0 && item.expiration <= now {
item.res.Close()
delete(typMap, resName)
}
}
}
if len(typMap) == 0 {
delete(m.resources, typ)
}
}
}
func (m *manager) updateExpire(item *item) {
select {
case item.updateTimeChan <- time.Now().UnixNano() + item.res.KeepAliveTime().Nanoseconds():
default:
}
}
type NewResourceFunc func() (Resource, error)
func (m *manager) Get(typ, name string, newResourceFunc NewResourceFunc) (Resource, error) {
m.mu.RLock()
typMap, ok := m.resources[typ]
if ok {
item := typMap[name]
if item != nil {
m.mu.RUnlock()
m.updateExpire(item)
return item.res, nil
}
}
m.mu.RUnlock()
m.mu.Lock()
defer m.mu.Unlock()
typMap, ok = m.resources[typ]
if !ok {
typMap = make(map[string]*item)
m.resources[typ] = typMap
}
ite, ok := typMap[name]
if !ok {
res, err := newResourceFunc()
if err != nil {
return nil, err
}
if res.KeepAliveTime() == 0 {
defaultExpiration := m.defaultTypeExpirations[typ]
if defaultExpiration == 0 {
defaultExpiration = m.defaultExpiration
}
res = NewResource(WithResource(res), WithKeepAliveTime(defaultExpiration))
}
ite = &item{
res: res,
updateTimeChan: make(chan int64, 1),
deleteMark: make(chan struct{}, 1),
}
typMap[name] = ite
}
m.updateExpire(ite)
return ite.res, nil
}
func (m *manager) Delete(typ, name string) Resource {
m.mu.Lock()
defer m.mu.Unlock()
typMap, ok := m.resources[typ]
if !ok {
return nil
}
ite, ok := typMap[name]
if !ok {
return nil
}
select {
case ite.deleteMark <- struct{}{}:
default:
}
return ite.res
}
func (m *manager) Close() {
m.stopOnce.Do(func() {
close(m.stop)
m.wg.Wait()
})
}

View File

@ -0,0 +1,160 @@
package resource
import (
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
)
func TestResourceManager(t *testing.T) {
{
manager := NewManager(0, 0, nil)
manager.Close()
}
manager := NewManager(500*time.Millisecond, 2*time.Second, map[string]time.Duration{
"test": time.Second,
})
defer manager.Close()
{
assert.Nil(t, manager.Delete("test", "test"))
res, err := manager.Get("stream", "foo", func() (Resource, error) {
return NewSimpleResource("stream-foo", "stream", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, 2*time.Second, res.KeepAliveTime())
assert.Equal(t, "stream-foo", res.Get())
}
{
_, err := manager.Get("err", "foo", func() (Resource, error) {
return nil, errors.New("mock test error")
})
assert.Error(t, err)
}
{
res, err := manager.Get("test", "foo", func() (Resource, error) {
return NewSimpleResource("foo", "test", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "foo", res.Get())
assert.Nil(t, manager.Delete("test", "test"))
}
{
time.Sleep(500 * time.Millisecond)
res, err := manager.Get("test", "foo", func() (Resource, error) {
return NewSimpleResource("foox", "test", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "foo", res.Get())
}
{
time.Sleep(3 * time.Second)
res, err := manager.Get("test", "foo", func() (Resource, error) {
return NewSimpleResource("foo2", "test", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "foo2", res.Get(), res.KeepAliveTime())
}
{
res := manager.Delete("test", "foo")
assert.Equal(t, "foo2", res.Get())
res = manager.Delete("test", "foo")
assert.Equal(t, "foo2", res.Get())
time.Sleep(time.Second)
res, err := manager.Get("test", "foo", func() (Resource, error) {
return NewSimpleResource("foo3", "test", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "foo3", res.Get())
}
{
time.Sleep(2 * time.Second)
res, err := manager.Get("stream", "foo", func() (Resource, error) {
return NewSimpleResource("stream-foox", "stream", "foo", 0, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "stream-foox", res.Get())
}
{
var res Resource
var err error
res, err = manager.Get("ever", "foo", func() (Resource, error) {
return NewSimpleResource("ever-foo", "ever", "foo", NoExpiration, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "ever-foo", res.Get())
res, err = manager.Get("ever", "foo", func() (Resource, error) {
return NewSimpleResource("ever-foo2", "ever", "foo", NoExpiration, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "ever-foo", res.Get())
manager.Delete("ever", "foo")
time.Sleep(time.Second)
res, err = manager.Get("ever", "foo", func() (Resource, error) {
return NewSimpleResource("ever-foo3", "ever", "foo", NoExpiration, nil), nil
})
assert.NoError(t, err)
assert.Equal(t, "ever-foo3", res.Get())
}
}
func TestResource(t *testing.T) {
{
isClose := false
res := NewSimpleResource("obj", "test", "foo", 0, func() {
isClose = true
})
assert.Equal(t, "test", res.Type())
assert.Equal(t, "foo", res.Name())
assert.Equal(t, "obj", res.Get())
assert.EqualValues(t, 0, res.KeepAliveTime())
res.Close()
assert.True(t, isClose)
}
{
res := NewResource()
assert.Empty(t, res.Type())
assert.Empty(t, res.Name())
assert.Empty(t, res.Get())
assert.EqualValues(t, 0, res.KeepAliveTime())
}
{
isClose := false
res := NewSimpleResource("obj", "test", "foo", 0, func() {
isClose = true
})
isClose2 := false
wrapper := NewResource(WithResource(res), WithType("test2"), WithName("foo2"), WithObj("obj2"), WithKeepAliveTime(time.Second), WithCloseFunc(func() {
isClose2 = true
}))
wrapper.Close()
assert.Equal(t, "test2", wrapper.Type())
assert.Equal(t, "foo2", wrapper.Name())
assert.Equal(t, "obj2", wrapper.Get())
assert.Equal(t, time.Second, wrapper.KeepAliveTime())
assert.True(t, isClose)
assert.True(t, isClose2)
}
{
isClose := false
res := NewSimpleResource("obj", "test", "foo", 0, func() {
isClose = true
})
wrapper := NewResource(WithResource(res))
assert.Equal(t, "test", wrapper.Type())
assert.Equal(t, "foo", wrapper.Name())
assert.Equal(t, "obj", wrapper.Get())
assert.EqualValues(t, 0, wrapper.KeepAliveTime())
wrapper.Close()
assert.True(t, isClose)
}
}