Remove partition tag

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
bigsheeper 2021-02-03 16:44:38 +08:00 committed by yefu.chen
parent 6c3f169ecc
commit abffdbff62
52 changed files with 1825 additions and 2108 deletions

View File

@ -9,5 +9,5 @@ dataservice:
defaultSizePerRecord: 1024
# old name: segmentExpireDuration: 2000
IDAssignExpiration: 2000 # ms
insertChannelNum: 16
insertChannelNumPerCollection: 4
dataNodeNum: 1

3
go.mod
View File

@ -18,7 +18,6 @@ require (
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-basic/ipv4 v1.0.0
github.com/go-kit/kit v0.9.0
github.com/gogo/protobuf v1.2.1
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/mock v1.3.1
github.com/golang/protobuf v1.3.2
@ -27,7 +26,6 @@ require (
github.com/klauspost/compress v1.10.11 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/minio/minio-go/v7 v7.0.5
github.com/modern-go/reflect2 v1.0.1
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/oklog/run v1.1.0
github.com/onsi/ginkgo v1.12.1 // indirect
@ -59,7 +57,6 @@ require (
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
golang.org/x/net v0.0.0-20200904194848-62affa334b73
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f // indirect
golang.org/x/text v0.3.3
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect

View File

@ -1144,7 +1144,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, base_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, db_name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, collection_names_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, collection_name_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::RegisterLinkResponse, _internal_metadata_),
~0u, // no _extensions_
@ -1359,72 +1359,72 @@ const char descriptor_table_protodef_milvus_2eproto[] PROTOBUF_SECTION_VARIABLE(
"s\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scor"
"es\030\003 \003(\002\"J\n\rSearchResults\022+\n\006status\030\001 \001("
"\0132\033.milvus.proto.common.Status\022\014\n\004hits\030\002"
" \003(\014\"e\n\014FlushRequest\022*\n\004base\030\001 \001(\0132\034.mil"
" \003(\014\"d\n\014FlushRequest\022*\n\004base\030\001 \001(\0132\034.mil"
"vus.proto.common.MsgBase\022\017\n\007db_name\030\002 \001("
"\t\022\030\n\020collection_names\030\003 \003(\t\"r\n\024RegisterL"
"inkResponse\022-\n\007address\030\001 \001(\0132\034.milvus.pr"
"oto.common.Address\022+\n\006status\030\002 \001(\0132\033.mil"
"vus.proto.common.Status*@\n\017PlaceholderTy"
"pe\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTO"
"R_FLOAT\020e2\357\020\n\rMilvusService\022_\n\020CreateCol"
"lection\022,.milvus.proto.milvus.CreateColl"
"ectionRequest\032\033.milvus.proto.common.Stat"
"us\"\000\022[\n\016DropCollection\022*.milvus.proto.mi"
"lvus.DropCollectionRequest\032\033.milvus.prot"
"o.common.Status\"\000\022_\n\rHasCollection\022).mil"
"vus.proto.milvus.HasCollectionRequest\032!."
"milvus.proto.milvus.BoolResponse\"\000\022[\n\016Lo"
"adCollection\022*.milvus.proto.milvus.LoadC"
"ollectionRequest\032\033.milvus.proto.common.S"
"tatus\"\000\022a\n\021ReleaseCollection\022-.milvus.pr"
"oto.milvus.ReleaseCollectionRequest\032\033.mi"
"lvus.proto.common.Status\"\000\022w\n\022DescribeCo"
"llection\022..milvus.proto.milvus.DescribeC"
"ollectionRequest\032/.milvus.proto.milvus.D"
"escribeCollectionResponse\"\000\022v\n\027GetCollec"
"tionStatistics\022+.milvus.proto.milvus.Col"
"lectionStatsRequest\032,.milvus.proto.milvu"
"s.CollectionStatsResponse\"\000\022l\n\017ShowColle"
"ctions\022*.milvus.proto.milvus.ShowCollect"
"ionRequest\032+.milvus.proto.milvus.ShowCol"
"lectionResponse\"\000\022]\n\017CreatePartition\022+.m"
"ilvus.proto.milvus.CreatePartitionReques"
"t\032\033.milvus.proto.common.Status\"\000\022Y\n\rDrop"
"Partition\022).milvus.proto.milvus.DropPart"
"itionRequest\032\033.milvus.proto.common.Statu"
"s\"\000\022]\n\014HasPartition\022(.milvus.proto.milvu"
"s.HasPartitionRequest\032!.milvus.proto.mil"
"vus.BoolResponse\"\000\022Y\n\016LoadPartitions\022(.m"
"ilvus.proto.milvus.LoadPartitonRequest\032\033"
".milvus.proto.common.Status\"\000\022`\n\021Release"
"Partitions\022,.milvus.proto.milvus.Release"
"PartitionRequest\032\033.milvus.proto.common.S"
"tatus\"\000\022s\n\026GetPartitionStatistics\022*.milv"
"us.proto.milvus.PartitionStatsRequest\032+."
"milvus.proto.milvus.PartitionStatsRespon"
"se\"\000\022i\n\016ShowPartitions\022).milvus.proto.mi"
"lvus.ShowPartitionRequest\032*.milvus.proto"
".milvus.ShowPartitionResponse\"\000\022U\n\013Creat"
"eIndex\022\'.milvus.proto.milvus.CreateIndex"
"Request\032\033.milvus.proto.common.Status\"\000\022h"
"\n\rDescribeIndex\022).milvus.proto.milvus.De"
"scribeIndexRequest\032*.milvus.proto.milvus"
".DescribeIndexResponse\"\000\022b\n\rGetIndexStat"
"e\022&.milvus.proto.milvus.IndexStateReques"
"t\032\'.milvus.proto.milvus.IndexStateRespon"
"se\"\000\022S\n\006Insert\022\".milvus.proto.milvus.Ins"
"ertRequest\032#.milvus.proto.milvus.InsertR"
"esponse\"\000\022R\n\006Search\022\".milvus.proto.milvu"
"s.SearchRequest\032\".milvus.proto.milvus.Se"
"archResults\"\000\022I\n\005Flush\022!.milvus.proto.mi"
"lvus.FlushRequest\032\033.milvus.proto.common."
"Status\"\000\022Q\n\014GetDdChannel\022\032.milvus.proto."
"common.Empty\032#.milvus.proto.milvus.Strin"
"gResponse\"\0002g\n\014ProxyService\022W\n\014RegisterL"
"ink\022\032.milvus.proto.common.Empty\032).milvus"
".proto.milvus.RegisterLinkResponse\"\000BBZ@"
"github.com/zilliztech/milvus-distributed"
"/internal/proto/milvuspbb\006proto3"
"\t\022\027\n\017collection_name\030\003 \001(\t\"r\n\024RegisterLi"
"nkResponse\022-\n\007address\030\001 \001(\0132\034.milvus.pro"
"to.common.Address\022+\n\006status\030\002 \001(\0132\033.milv"
"us.proto.common.Status*@\n\017PlaceholderTyp"
"e\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR"
"_FLOAT\020e2\357\020\n\rMilvusService\022_\n\020CreateColl"
"ection\022,.milvus.proto.milvus.CreateColle"
"ctionRequest\032\033.milvus.proto.common.Statu"
"s\"\000\022[\n\016DropCollection\022*.milvus.proto.mil"
"vus.DropCollectionRequest\032\033.milvus.proto"
".common.Status\"\000\022_\n\rHasCollection\022).milv"
"us.proto.milvus.HasCollectionRequest\032!.m"
"ilvus.proto.milvus.BoolResponse\"\000\022[\n\016Loa"
"dCollection\022*.milvus.proto.milvus.LoadCo"
"llectionRequest\032\033.milvus.proto.common.St"
"atus\"\000\022a\n\021ReleaseCollection\022-.milvus.pro"
"to.milvus.ReleaseCollectionRequest\032\033.mil"
"vus.proto.common.Status\"\000\022w\n\022DescribeCol"
"lection\022..milvus.proto.milvus.DescribeCo"
"llectionRequest\032/.milvus.proto.milvus.De"
"scribeCollectionResponse\"\000\022v\n\027GetCollect"
"ionStatistics\022+.milvus.proto.milvus.Coll"
"ectionStatsRequest\032,.milvus.proto.milvus"
".CollectionStatsResponse\"\000\022l\n\017ShowCollec"
"tions\022*.milvus.proto.milvus.ShowCollecti"
"onRequest\032+.milvus.proto.milvus.ShowColl"
"ectionResponse\"\000\022]\n\017CreatePartition\022+.mi"
"lvus.proto.milvus.CreatePartitionRequest"
"\032\033.milvus.proto.common.Status\"\000\022Y\n\rDropP"
"artition\022).milvus.proto.milvus.DropParti"
"tionRequest\032\033.milvus.proto.common.Status"
"\"\000\022]\n\014HasPartition\022(.milvus.proto.milvus"
".HasPartitionRequest\032!.milvus.proto.milv"
"us.BoolResponse\"\000\022Y\n\016LoadPartitions\022(.mi"
"lvus.proto.milvus.LoadPartitonRequest\032\033."
"milvus.proto.common.Status\"\000\022`\n\021ReleaseP"
"artitions\022,.milvus.proto.milvus.ReleaseP"
"artitionRequest\032\033.milvus.proto.common.St"
"atus\"\000\022s\n\026GetPartitionStatistics\022*.milvu"
"s.proto.milvus.PartitionStatsRequest\032+.m"
"ilvus.proto.milvus.PartitionStatsRespons"
"e\"\000\022i\n\016ShowPartitions\022).milvus.proto.mil"
"vus.ShowPartitionRequest\032*.milvus.proto."
"milvus.ShowPartitionResponse\"\000\022U\n\013Create"
"Index\022\'.milvus.proto.milvus.CreateIndexR"
"equest\032\033.milvus.proto.common.Status\"\000\022h\n"
"\rDescribeIndex\022).milvus.proto.milvus.Des"
"cribeIndexRequest\032*.milvus.proto.milvus."
"DescribeIndexResponse\"\000\022b\n\rGetIndexState"
"\022&.milvus.proto.milvus.IndexStateRequest"
"\032\'.milvus.proto.milvus.IndexStateRespons"
"e\"\000\022S\n\006Insert\022\".milvus.proto.milvus.Inse"
"rtRequest\032#.milvus.proto.milvus.InsertRe"
"sponse\"\000\022R\n\006Search\022\".milvus.proto.milvus"
".SearchRequest\032\".milvus.proto.milvus.Sea"
"rchResults\"\000\022I\n\005Flush\022!.milvus.proto.mil"
"vus.FlushRequest\032\033.milvus.proto.common.S"
"tatus\"\000\022Q\n\014GetDdChannel\022\032.milvus.proto.c"
"ommon.Empty\032#.milvus.proto.milvus.String"
"Response\"\0002g\n\014ProxyService\022W\n\014RegisterLi"
"nk\022\032.milvus.proto.common.Empty\032).milvus."
"proto.milvus.RegisterLinkResponse\"\000BBZ@g"
"ithub.com/zilliztech/milvus-distributed/"
"internal/proto/milvuspbb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_milvus_2eproto_deps[2] = {
&::descriptor_table_common_2eproto,
@ -1476,7 +1476,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_mil
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_milvus_2eproto_once;
static bool descriptor_table_milvus_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_milvus_2eproto = {
&descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 7312,
&descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 7311,
&descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 41, 2,
schemas, file_default_instances, TableStruct_milvus_2eproto::offsets,
file_level_metadata_milvus_2eproto, 41, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto,
@ -17153,13 +17153,16 @@ FlushRequest::FlushRequest()
}
FlushRequest::FlushRequest(const FlushRequest& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_internal_metadata_(nullptr),
collection_names_(from.collection_names_) {
_internal_metadata_(nullptr) {
_internal_metadata_.MergeFrom(from._internal_metadata_);
db_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
if (!from.db_name().empty()) {
db_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.db_name_);
}
collection_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
if (!from.collection_name().empty()) {
collection_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collection_name_);
}
if (from.has_base()) {
base_ = new ::milvus::proto::common::MsgBase(*from.base_);
} else {
@ -17171,6 +17174,7 @@ FlushRequest::FlushRequest(const FlushRequest& from)
void FlushRequest::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_FlushRequest_milvus_2eproto.base);
db_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
collection_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
base_ = nullptr;
}
@ -17181,6 +17185,7 @@ FlushRequest::~FlushRequest() {
void FlushRequest::SharedDtor() {
db_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
collection_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
if (this != internal_default_instance()) delete base_;
}
@ -17199,8 +17204,8 @@ void FlushRequest::Clear() {
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
collection_names_.Clear();
db_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
collection_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
if (GetArenaNoVirtual() == nullptr && base_ != nullptr) {
delete base_;
}
@ -17230,16 +17235,11 @@ const char* FlushRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_I
CHK_(ptr);
} else goto handle_unusual;
continue;
// repeated string collection_names = 3;
// string collection_name = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_collection_names(), ptr, ctx, "milvus.proto.milvus.FlushRequest.collection_names");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_collection_name(), ptr, ctx, "milvus.proto.milvus.FlushRequest.collection_name");
CHK_(ptr);
} else goto handle_unusual;
continue;
default: {
@ -17298,16 +17298,15 @@ bool FlushRequest::MergePartialFromCodedStream(
break;
}
// repeated string collection_names = 3;
// string collection_name = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_collection_names()));
input, this->mutable_collection_name()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->collection_names(this->collection_names_size() - 1).data(),
static_cast<int>(this->collection_names(this->collection_names_size() - 1).length()),
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.milvus.FlushRequest.collection_names"));
"milvus.proto.milvus.FlushRequest.collection_name"));
} else {
goto handle_unusual;
}
@ -17357,14 +17356,14 @@ void FlushRequest::SerializeWithCachedSizes(
2, this->db_name(), output);
}
// repeated string collection_names = 3;
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
// string collection_name = 3;
if (this->collection_name().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->collection_names(i).data(), static_cast<int>(this->collection_names(i).length()),
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.milvus.FlushRequest.collection_names");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
3, this->collection_names(i), output);
"milvus.proto.milvus.FlushRequest.collection_name");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
3, this->collection_name(), output);
}
if (_internal_metadata_.have_unknown_fields()) {
@ -17398,14 +17397,15 @@ void FlushRequest::SerializeWithCachedSizes(
2, this->db_name(), target);
}
// repeated string collection_names = 3;
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
// string collection_name = 3;
if (this->collection_name().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->collection_names(i).data(), static_cast<int>(this->collection_names(i).length()),
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.milvus.FlushRequest.collection_names");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(3, this->collection_names(i), target);
"milvus.proto.milvus.FlushRequest.collection_name");
target =
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
3, this->collection_name(), target);
}
if (_internal_metadata_.have_unknown_fields()) {
@ -17429,14 +17429,6 @@ size_t FlushRequest::ByteSizeLong() const {
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated string collection_names = 3;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->collection_names_size());
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->collection_names(i));
}
// string db_name = 2;
if (this->db_name().size() > 0) {
total_size += 1 +
@ -17444,6 +17436,13 @@ size_t FlushRequest::ByteSizeLong() const {
this->db_name());
}
// string collection_name = 3;
if (this->collection_name().size() > 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->collection_name());
}
// .milvus.proto.common.MsgBase base = 1;
if (this->has_base()) {
total_size += 1 +
@ -17478,11 +17477,14 @@ void FlushRequest::MergeFrom(const FlushRequest& from) {
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
collection_names_.MergeFrom(from.collection_names_);
if (from.db_name().size() > 0) {
db_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.db_name_);
}
if (from.collection_name().size() > 0) {
collection_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collection_name_);
}
if (from.has_base()) {
mutable_base()->::milvus::proto::common::MsgBase::MergeFrom(from.base());
}
@ -17509,9 +17511,10 @@ bool FlushRequest::IsInitialized() const {
void FlushRequest::InternalSwap(FlushRequest* other) {
using std::swap;
_internal_metadata_.Swap(&other->_internal_metadata_);
collection_names_.InternalSwap(CastToBase(&other->collection_names_));
db_name_.Swap(&other->db_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
collection_name_.Swap(&other->collection_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
swap(base_, other->base_);
}

View File

@ -6689,27 +6689,10 @@ class FlushRequest :
// accessors -------------------------------------------------------
enum : int {
kCollectionNamesFieldNumber = 3,
kDbNameFieldNumber = 2,
kCollectionNameFieldNumber = 3,
kBaseFieldNumber = 1,
};
// repeated string collection_names = 3;
int collection_names_size() const;
void clear_collection_names();
const std::string& collection_names(int index) const;
std::string* mutable_collection_names(int index);
void set_collection_names(int index, const std::string& value);
void set_collection_names(int index, std::string&& value);
void set_collection_names(int index, const char* value);
void set_collection_names(int index, const char* value, size_t size);
std::string* add_collection_names();
void add_collection_names(const std::string& value);
void add_collection_names(std::string&& value);
void add_collection_names(const char* value);
void add_collection_names(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& collection_names() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_collection_names();
// string db_name = 2;
void clear_db_name();
const std::string& db_name() const;
@ -6721,6 +6704,17 @@ class FlushRequest :
std::string* release_db_name();
void set_allocated_db_name(std::string* db_name);
// string collection_name = 3;
void clear_collection_name();
const std::string& collection_name() const;
void set_collection_name(const std::string& value);
void set_collection_name(std::string&& value);
void set_collection_name(const char* value);
void set_collection_name(const char* value, size_t size);
std::string* mutable_collection_name();
std::string* release_collection_name();
void set_allocated_collection_name(std::string* collection_name);
// .milvus.proto.common.MsgBase base = 1;
bool has_base() const;
void clear_base();
@ -6734,8 +6728,8 @@ class FlushRequest :
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> collection_names_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr db_name_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collection_name_;
::milvus::proto::common::MsgBase* base_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_milvus_2eproto;
@ -12619,69 +12613,55 @@ inline void FlushRequest::set_allocated_db_name(std::string* db_name) {
// @@protoc_insertion_point(field_set_allocated:milvus.proto.milvus.FlushRequest.db_name)
}
// repeated string collection_names = 3;
inline int FlushRequest::collection_names_size() const {
return collection_names_.size();
// string collection_name = 3;
inline void FlushRequest::clear_collection_name() {
collection_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline void FlushRequest::clear_collection_names() {
collection_names_.Clear();
inline const std::string& FlushRequest::collection_name() const {
// @@protoc_insertion_point(field_get:milvus.proto.milvus.FlushRequest.collection_name)
return collection_name_.GetNoArena();
}
inline const std::string& FlushRequest::collection_names(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.milvus.FlushRequest.collection_names)
return collection_names_.Get(index);
inline void FlushRequest::set_collection_name(const std::string& value) {
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_name)
}
inline std::string* FlushRequest::mutable_collection_names(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.milvus.FlushRequest.collection_names)
return collection_names_.Mutable(index);
inline void FlushRequest::set_collection_name(std::string&& value) {
collection_name_.SetNoArena(
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:milvus.proto.milvus.FlushRequest.collection_name)
}
inline void FlushRequest::set_collection_names(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_names)
collection_names_.Mutable(index)->assign(value);
}
inline void FlushRequest::set_collection_names(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_names)
collection_names_.Mutable(index)->assign(std::move(value));
}
inline void FlushRequest::set_collection_names(int index, const char* value) {
inline void FlushRequest::set_collection_name(const char* value) {
GOOGLE_DCHECK(value != nullptr);
collection_names_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.milvus.FlushRequest.collection_names)
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
// @@protoc_insertion_point(field_set_char:milvus.proto.milvus.FlushRequest.collection_name)
}
inline void FlushRequest::set_collection_names(int index, const char* value, size_t size) {
collection_names_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.milvus.FlushRequest.collection_names)
inline void FlushRequest::set_collection_name(const char* value, size_t size) {
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:milvus.proto.milvus.FlushRequest.collection_name)
}
inline std::string* FlushRequest::add_collection_names() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.milvus.FlushRequest.collection_names)
return collection_names_.Add();
inline std::string* FlushRequest::mutable_collection_name() {
// @@protoc_insertion_point(field_mutable:milvus.proto.milvus.FlushRequest.collection_name)
return collection_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline void FlushRequest::add_collection_names(const std::string& value) {
collection_names_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.milvus.FlushRequest.collection_names)
inline std::string* FlushRequest::release_collection_name() {
// @@protoc_insertion_point(field_release:milvus.proto.milvus.FlushRequest.collection_name)
return collection_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
inline void FlushRequest::add_collection_names(std::string&& value) {
collection_names_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.milvus.FlushRequest.collection_names)
}
inline void FlushRequest::add_collection_names(const char* value) {
GOOGLE_DCHECK(value != nullptr);
collection_names_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.milvus.FlushRequest.collection_names)
}
inline void FlushRequest::add_collection_names(const char* value, size_t size) {
collection_names_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.milvus.FlushRequest.collection_names)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
FlushRequest::collection_names() const {
// @@protoc_insertion_point(field_list:milvus.proto.milvus.FlushRequest.collection_names)
return collection_names_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
FlushRequest::mutable_collection_names() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.milvus.FlushRequest.collection_names)
return &collection_names_;
inline void FlushRequest::set_allocated_collection_name(std::string* collection_name) {
if (collection_name != nullptr) {
} else {
}
collection_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), collection_name);
// @@protoc_insertion_point(field_set_allocated:milvus.proto.milvus.FlushRequest.collection_name)
}
// -------------------------------------------------------------------

View File

@ -0,0 +1,137 @@
package writerclient
import (
"strconv"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
type UniqueID = typeutil.UniqueID
type Timestamp = typeutil.Timestamp
type Client struct {
kvClient kv.TxnBase // client of a reliable kv service, i.e. etcd client
kvPrefix string
flushStream msgstream.MsgStream
}
func NewWriterClient(etcdAddress string, kvRootPath string, writeNodeSegKvSubPath string, flushStream msgstream.MsgStream) (*Client, error) {
// init kv client
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
if err != nil {
return nil, err
}
kvClient := etcdkv.NewEtcdKV(etcdClient, kvRootPath)
return &Client{
kvClient: kvClient,
kvPrefix: writeNodeSegKvSubPath,
flushStream: flushStream,
}, nil
}
type SegmentDescription struct {
SegmentID UniqueID
IsClosed bool
OpenTime Timestamp
CloseTime Timestamp
}
func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
baseMsg := msgstream.BaseMsg{
BeginTimestamp: 0,
EndTimestamp: 0,
HashValues: []uint32{0},
}
flushMsg := internalpb2.FlushMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
Timestamp: timestamp,
},
SegmentID: segmentID,
CollectionID: collectionID,
PartitionTag: partitionTag,
}
fMsg := &msgstream.FlushMsg{
BaseMsg: baseMsg,
FlushMsg: flushMsg,
}
msgPack := msgstream.MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, fMsg)
err := c.flushStream.Produce(&msgPack)
return err
}
func (c *Client) DescribeSegment(segmentID UniqueID) (*SegmentDescription, error) {
// query etcd
ret := &SegmentDescription{
SegmentID: segmentID,
IsClosed: false,
}
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
etcdKV, ok := c.kvClient.(*etcdkv.EtcdKV)
if !ok {
return nil, errors.New("type assertion failed for etcd kv")
}
count, err := etcdKV.GetCount(key)
if err != nil {
return nil, err
}
if count <= 0 {
ret.IsClosed = false
return ret, nil
}
value, err := c.kvClient.Load(key)
if err != nil {
return ret, err
}
flushMeta := pb.SegmentFlushMeta{}
err = proto.UnmarshalText(value, &flushMeta)
if err != nil {
return ret, err
}
ret.IsClosed = flushMeta.IsClosed
ret.OpenTime = flushMeta.OpenTime
ret.CloseTime = flushMeta.CloseTime
return ret, nil
}
func (c *Client) GetInsertBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
value, err := c.kvClient.Load(key)
if err != nil {
return nil, err
}
flushMeta := pb.SegmentFlushMeta{}
err = proto.UnmarshalText(value, &flushMeta)
if err != nil {
return nil, err
}
ret := make(map[int64][]string)
for _, field := range flushMeta.Fields {
ret[field.FieldID] = field.BinlogPaths
}
return ret, nil
}

View File

@ -2,7 +2,6 @@ package datanode
import (
"context"
"fmt"
"io"
"log"
"time"
@ -83,9 +82,9 @@ func NewDataNode(ctx context.Context) *DataNode {
node := &DataNode{
ctx: ctx2,
cancel: cancel2,
NodeID: Params.NodeID, // GOOSE TODO: How to init
NodeID: Params.NodeID, // GOOSE TODO How to init
Role: typeutil.DataNodeRole,
State: internalpb2.StateCode_INITIALIZING, // GOOSE TODO: atomic
State: internalpb2.StateCode_INITIALIZING,
dataSyncService: nil,
metaService: nil,
masterService: nil,
@ -97,26 +96,15 @@ func NewDataNode(ctx context.Context) *DataNode {
}
func (node *DataNode) SetMasterServiceInterface(ms MasterServiceInterface) error {
switch {
case ms == nil, node.masterService != nil:
return errors.New("Nil parameter or repeatly set")
default:
node.masterService = ms
return nil
}
node.masterService = ms
return nil
}
func (node *DataNode) SetDataServiceInterface(ds DataServiceInterface) error {
switch {
case ds == nil, node.dataService != nil:
return errors.New("Nil parameter or repeatly set")
default:
node.dataService = ds
return nil
}
node.dataService = ds
return nil
}
// Suppose dataservice is in INITIALIZING
func (node *DataNode) Init() error {
req := &datapb.RegisterNodeRequest{
@ -157,15 +145,11 @@ func (node *DataNode) Init() error {
}
var alloc allocator = newAllocatorImpl(node.masterService)
chanSize := 100
node.flushChan = make(chan *flushMsg, chanSize)
node.dataSyncService = newDataSyncService(node.ctx, node.flushChan, replica, alloc)
node.metaService = newMetaService(node.ctx, replica, node.masterService)
node.replica = replica
node.dataSyncService.initNodes()
// --- Opentracing ---
cfg := &config.Configuration{
@ -190,38 +174,19 @@ func (node *DataNode) Init() error {
}
func (node *DataNode) Start() error {
go node.dataSyncService.start()
node.metaService.init()
node.State = internalpb2.StateCode_HEALTHY
return nil
}
// DataNode is HEALTHY until StartSync() is called
func (node *DataNode) StartSync() {
node.dataSyncService.init()
go node.dataSyncService.start()
node.State = internalpb2.StateCode_HEALTHY
}
func (node *DataNode) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
}
log.Println("Init insert channel names:", in.GetChannelNames())
Params.InsertChannelNames = append(Params.InsertChannelNames, in.GetChannelNames()...)
switch {
case node.State != internalpb2.StateCode_HEALTHY:
status.Reason = fmt.Sprintf("DataNode %d not healthy!", node.NodeID)
return status, errors.New(status.GetReason())
case len(Params.InsertChannelNames) != 0:
status.Reason = fmt.Sprintf("DataNode has %d already set insert channels!", node.NodeID)
return status, errors.New(status.GetReason())
default:
Params.InsertChannelNames = in.GetChannelNames()
status.ErrorCode = commonpb.ErrorCode_SUCCESS
node.StartSync()
return status, nil
}
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
}
func (node *DataNode) GetComponentStates() (*internalpb2.ComponentStates, error) {

View File

@ -1,10 +1,7 @@
package datanode
import (
"bytes"
"encoding/binary"
"log"
"math"
"math/rand"
"os"
"strconv"
@ -13,14 +10,6 @@ import (
"go.etcd.io/etcd/clientv3"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
func makeNewChannelNames(names []string, suffix string) []string {
@ -88,366 +77,3 @@ func clearEtcd(rootPath string) error {
return nil
}
type (
Factory interface {
}
MetaFactory struct {
}
DataFactory struct {
rawData []byte
}
AllocatorFactory struct {
ID UniqueID
}
MasterServiceFactory struct {
ID UniqueID
collectionName string
collectionID UniqueID
}
)
func (mf *MetaFactory) CollectionMetaFactory(collectionID UniqueID, collectionName string) *etcdpb.CollectionMeta {
sch := schemapb.CollectionSchema{
Name: collectionName,
Description: "test collection by meta factory",
AutoID: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: 0,
Name: "RowID",
Description: "RowID field",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "f0_tk1",
Value: "f0_tv1",
},
},
},
{
FieldID: 1,
Name: "Timestamp",
Description: "Timestamp field",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "f1_tk1",
Value: "f1_tv1",
},
},
},
{
FieldID: 100,
Name: "float_vector_field",
Description: "field 100",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "indexkey",
Value: "indexvalue",
},
},
},
{
FieldID: 101,
Name: "binary_vector_field",
Description: "field 101",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "32",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "indexkey",
Value: "indexvalue",
},
},
},
{
FieldID: 102,
Name: "bool_field",
Description: "field 102",
DataType: schemapb.DataType_BOOL,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 103,
Name: "int8_field",
Description: "field 103",
DataType: schemapb.DataType_INT8,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 104,
Name: "int16_field",
Description: "field 104",
DataType: schemapb.DataType_INT16,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 105,
Name: "int32_field",
Description: "field 105",
DataType: schemapb.DataType_INT32,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 106,
Name: "int64_field",
Description: "field 106",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 107,
Name: "float32_field",
Description: "field 107",
DataType: schemapb.DataType_FLOAT,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 108,
Name: "float64_field",
Description: "field 108",
DataType: schemapb.DataType_DOUBLE,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
},
}
collection := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &sch,
CreateTime: Timestamp(1),
SegmentIDs: make([]UniqueID, 0),
PartitionTags: make([]string, 0),
}
return &collection
}
func NewDataFactory() *DataFactory {
return &DataFactory{rawData: GenRowData()}
}
func GenRowData() (rawData []byte) {
const DIM = 2
const N = 1
// Float vector
var fvector = [DIM]float32{1, 2}
for _, ele := range fvector {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
rawData = append(rawData, buf...)
}
// Binary vector
// Dimension of binary vector is 32
// size := 4, = 32 / 8
var bvector = []byte{255, 255, 255, 0}
rawData = append(rawData, bvector...)
// Bool
var fieldBool = true
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
panic(err)
}
rawData = append(rawData, buf.Bytes()...)
// int8
var dataInt8 int8 = 100
bint8 := new(bytes.Buffer)
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
panic(err)
}
rawData = append(rawData, bint8.Bytes()...)
// int16
var dataInt16 int16 = 200
bint16 := new(bytes.Buffer)
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
panic(err)
}
rawData = append(rawData, bint16.Bytes()...)
// int32
var dataInt32 int32 = 300
bint32 := new(bytes.Buffer)
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
panic(err)
}
rawData = append(rawData, bint32.Bytes()...)
// int64
var dataInt64 int64 = 400
bint64 := new(bytes.Buffer)
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
panic(err)
}
rawData = append(rawData, bint64.Bytes()...)
// float32
var datafloat float32 = 1.1
bfloat32 := new(bytes.Buffer)
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
panic(err)
}
rawData = append(rawData, bfloat32.Bytes()...)
// float64
var datafloat64 float64 = 2.2
bfloat64 := new(bytes.Buffer)
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
panic(err)
}
rawData = append(rawData, bfloat64.Bytes()...)
log.Println("Rawdata length:", len(rawData))
return
}
// n: number of TsinsertMsgs to generate
func (df *DataFactory) GetMsgStreamTsInsertMsgs(n int) (inMsgs []msgstream.TsMsg) {
for i := 0; i < n; i++ {
var msg msgstream.TsMsg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{uint32(i)},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: 0, // GOOSE TODO
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1", // GOOSE TODO
PartitionName: "default",
SegmentID: 1, // GOOSE TODO
ChannelID: "0", // GOOSE TODO
Timestamps: []Timestamp{Timestamp(i + 1000)},
RowIDs: []UniqueID{UniqueID(i)},
RowData: []*commonpb.Blob{{Value: df.rawData}},
},
}
inMsgs = append(inMsgs, msg)
}
return
}
// n: number of insertMsgs to generate
func (df *DataFactory) GetMsgStreamInsertMsgs(n int) (inMsgs []*msgstream.InsertMsg) {
for i := 0; i < n; i++ {
var msg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{uint32(i)},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: 0, // GOOSE TODO
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1", // GOOSE TODO
PartitionName: "default",
SegmentID: 1, // GOOSE TODO
ChannelID: "0", // GOOSE TODO
Timestamps: []Timestamp{Timestamp(i + 1000)},
RowIDs: []UniqueID{UniqueID(i)},
RowData: []*commonpb.Blob{{Value: df.rawData}},
},
}
inMsgs = append(inMsgs, msg)
}
return
}
func NewAllocatorFactory(id ...UniqueID) *AllocatorFactory {
f := &AllocatorFactory{}
if len(id) == 1 {
f.ID = id[0]
}
return f
}
func (alloc AllocatorFactory) setID(id UniqueID) {
alloc.ID = id
}
func (alloc AllocatorFactory) allocID() (UniqueID, error) {
if alloc.ID == 0 {
return UniqueID(0), nil // GOOSE TODO: random ID generating
}
return alloc.ID, nil
}
func (m *MasterServiceFactory) setID(id UniqueID) {
m.ID = id // GOOSE TODO: random ID generator
}
func (m *MasterServiceFactory) setCollectionID(id UniqueID) {
m.collectionID = id
}
func (m *MasterServiceFactory) setCollectionName(name string) {
m.collectionName = name
}
func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
resp := &masterpb.IDResponse{
Status: &commonpb.Status{},
ID: m.ID,
}
return resp, nil
}
func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
resp := &milvuspb.ShowCollectionResponse{
Status: &commonpb.Status{},
CollectionNames: []string{m.collectionName},
}
return resp, nil
}
func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
f := MetaFactory{}
meta := f.CollectionMetaFactory(m.collectionID, m.collectionName)
resp := &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{},
CollectionID: m.collectionID,
Schema: meta.Schema,
}
return resp, nil
}
func (m *MasterServiceFactory) GetComponentStates() (*internalpb2.ComponentStates, error) {
return &internalpb2.ComponentStates{
State: &internalpb2.ComponentInfo{},
SubcomponentStates: make([]*internalpb2.ComponentInfo, 0),
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
}, nil
}

View File

@ -19,27 +19,18 @@ type dataSyncService struct {
func newDataSyncService(ctx context.Context, flushChan chan *flushMsg,
replica collectionReplica, alloc allocator) *dataSyncService {
service := &dataSyncService{
return &dataSyncService{
ctx: ctx,
fg: nil,
flushChan: flushChan,
replica: replica,
idAllocator: alloc,
}
return service
}
func (dsService *dataSyncService) init() {
if len(Params.InsertChannelNames) == 0 {
log.Println("InsertChannels not readly, init datasync service failed")
return
}
dsService.initNodes()
}
func (dsService *dataSyncService) start() {
log.Println("Data Sync Service Start Successfully")
dsService.initNodes()
dsService.fg.Start()
}
@ -69,6 +60,7 @@ func (dsService *dataSyncService) initNodes() {
var ddStreamNode Node = newDDInputNode(dsService.ctx)
var filterDmNode Node = newFilteredDmNode()
var ddNode Node = newDDNode(dsService.ctx, mt, dsService.flushChan, dsService.replica, dsService.idAllocator)
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, mt, dsService.replica, dsService.idAllocator)
var gcNode Node = newGCNode(dsService.replica)

View File

@ -2,6 +2,7 @@ package datanode
import (
"context"
"encoding/binary"
"math"
"testing"
"time"
@ -41,15 +42,116 @@ func TestDataSyncService_Start(t *testing.T) {
allocFactory := AllocatorFactory{}
sync := newDataSyncService(ctx, flushChan, replica, allocFactory)
sync.replica.addCollection(collMeta.ID, collMeta.Schema)
sync.init()
go sync.start()
// test data generate
// GOOSE TODO orgnize
const DIM = 2
const N = 1
var rawData []byte
// Float vector
var fvector = [DIM]float32{1, 2}
for _, ele := range fvector {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
rawData = append(rawData, buf...)
}
// Binary vector
// Dimension of binary vector is 32
var bvector = [4]byte{255, 255, 255, 0}
for _, ele := range bvector {
bs := make([]byte, 4)
binary.LittleEndian.PutUint32(bs, uint32(ele))
rawData = append(rawData, bs...)
}
// Bool
bb := make([]byte, 4)
var fieldBool = true
var fieldBoolInt uint32
if fieldBool {
fieldBoolInt = 1
} else {
fieldBoolInt = 0
}
binary.LittleEndian.PutUint32(bb, fieldBoolInt)
rawData = append(rawData, bb...)
// int8
var dataInt8 int8 = 100
bint8 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint8, uint32(dataInt8))
rawData = append(rawData, bint8...)
// int16
var dataInt16 int16 = 200
bint16 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint16, uint32(dataInt16))
rawData = append(rawData, bint16...)
// int32
var dataInt32 int32 = 300
bint32 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint32, uint32(dataInt32))
rawData = append(rawData, bint32...)
// int64
var dataInt64 int64 = 300
bint64 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint64, uint32(dataInt64))
rawData = append(rawData, bint64...)
// float32
var datafloat float32 = 1.1
bfloat32 := make([]byte, 4)
binary.LittleEndian.PutUint32(bfloat32, math.Float32bits(datafloat))
rawData = append(rawData, bfloat32...)
// float64
var datafloat64 float64 = 2.2
bfloat64 := make([]byte, 8)
binary.LittleEndian.PutUint64(bfloat64, math.Float64bits(datafloat64))
rawData = append(rawData, bfloat64...)
timeRange := TimeRange{
timestampMin: 0,
timestampMax: math.MaxUint64,
}
dataFactory := NewDataFactory()
insertMessages := dataFactory.GetMsgStreamTsInsertMsgs(2)
// messages generate
const MSGLENGTH = 1
insertMessages := make([]msgstream.TsMsg, 0)
for i := 0; i < MSGLENGTH; i++ {
var msg msgstream.TsMsg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(i),
},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: UniqueID(0),
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1",
PartitionName: "default",
SegmentID: UniqueID(1),
ChannelID: "0",
Timestamps: []Timestamp{Timestamp(i + 1000)},
RowIDs: []UniqueID{UniqueID(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},
},
}
insertMessages = append(insertMessages, msg)
}
msgPack := msgstream.MsgPack{
BeginTs: timeRange.timestampMin,
@ -106,7 +208,6 @@ func TestDataSyncService_Start(t *testing.T) {
// dataSync
Params.FlushInsertBufferSize = 1
<-sync.ctx.Done()
sync.close()
}

View File

@ -0,0 +1,231 @@
package datanode
import (
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
type (
Factory interface {
}
MetaFactory struct {
}
AllocatorFactory struct {
ID UniqueID
}
MasterServiceFactory struct {
ID UniqueID
collectionName string
collectionID UniqueID
}
)
func (mf *MetaFactory) CollectionMetaFactory(collectionID UniqueID, collectionName string) *etcdpb.CollectionMeta {
sch := schemapb.CollectionSchema{
Name: collectionName,
Description: "test collection by meta factory",
AutoID: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: 0,
Name: "RowID",
Description: "RowID field",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "f0_tk1",
Value: "f0_tv1",
},
},
},
{
FieldID: 1,
Name: "Timestamp",
Description: "Timestamp field",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "f1_tk1",
Value: "f1_tv1",
},
},
},
{
FieldID: 100,
Name: "float_vector_field",
Description: "field 100",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "indexkey",
Value: "indexvalue",
},
},
},
{
FieldID: 101,
Name: "binary_vector_field",
Description: "field 101",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "32",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "indexkey",
Value: "indexvalue",
},
},
},
{
FieldID: 102,
Name: "bool_field",
Description: "field 102",
DataType: schemapb.DataType_BOOL,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 103,
Name: "int8_field",
Description: "field 103",
DataType: schemapb.DataType_INT8,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 104,
Name: "int16_field",
Description: "field 104",
DataType: schemapb.DataType_INT16,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 105,
Name: "int32_field",
Description: "field 105",
DataType: schemapb.DataType_INT32,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 106,
Name: "int64_field",
Description: "field 106",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 107,
Name: "float32_field",
Description: "field 107",
DataType: schemapb.DataType_FLOAT,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 108,
Name: "float64_field",
Description: "field 108",
DataType: schemapb.DataType_DOUBLE,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
},
}
collection := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &sch,
CreateTime: Timestamp(1),
SegmentIDs: make([]UniqueID, 0),
PartitionTags: make([]string, 0),
}
return &collection
}
func NewAllocatorFactory(id ...UniqueID) *AllocatorFactory {
f := &AllocatorFactory{}
if len(id) == 1 {
f.ID = id[0]
}
return f
}
func (alloc AllocatorFactory) setID(id UniqueID) {
alloc.ID = id
}
func (alloc AllocatorFactory) allocID() (UniqueID, error) {
if alloc.ID == 0 {
return UniqueID(0), nil // GOOSE TODO: random ID generating
}
return alloc.ID, nil
}
func (m *MasterServiceFactory) setID(id UniqueID) {
m.ID = id // GOOSE TODO: random ID generator
}
func (m *MasterServiceFactory) setCollectionID(id UniqueID) {
m.collectionID = id
}
func (m *MasterServiceFactory) setCollectionName(name string) {
m.collectionName = name
}
func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
resp := &masterpb.IDResponse{
Status: &commonpb.Status{},
ID: m.ID,
}
return resp, nil
}
func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
resp := &milvuspb.ShowCollectionResponse{
Status: &commonpb.Status{},
CollectionNames: []string{m.collectionName},
}
return resp, nil
}
func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
f := MetaFactory{}
meta := f.CollectionMetaFactory(m.collectionID, m.collectionName)
resp := &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{},
CollectionID: m.collectionID,
Schema: meta.Schema,
}
return resp, nil
}
func (m *MasterServiceFactory) GetComponentStates() (*internalpb2.ComponentStates, error) {
return &internalpb2.ComponentStates{
State: &internalpb2.ComponentInfo{},
SubcomponentStates: make([]*internalpb2.ComponentInfo, 0),
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
}, nil
}

View File

@ -132,6 +132,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
}
default:
//log.Println(". default: do nothing ...")
}
// generate binlog

View File

@ -1,7 +1,10 @@
package datanode
import (
"bytes"
"context"
"encoding/binary"
"log"
"math"
"testing"
"time"
@ -9,6 +12,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
@ -38,6 +43,7 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
err = replica.addCollection(collMeta.ID, collMeta.Schema)
require.NoError(t, err)
// Params.FlushInsertBufSize = 2
idFactory := AllocatorFactory{}
iBNode := newInsertBufferNode(ctx, newMetaTable(), replica, idFactory)
inMsg := genInsertMsg()
@ -46,6 +52,82 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
}
func genInsertMsg() insertMsg {
// test data generate
const DIM = 2
const N = 1
var rawData []byte
// Float vector
var fvector = [DIM]float32{1, 2}
for _, ele := range fvector {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
rawData = append(rawData, buf...)
}
// Binary vector
// Dimension of binary vector is 32
// size := 4, = 32 / 8
var bvector = []byte{255, 255, 255, 0}
rawData = append(rawData, bvector...)
// Bool
var fieldBool = true
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
panic(err)
}
rawData = append(rawData, buf.Bytes()...)
// int8
var dataInt8 int8 = 100
bint8 := new(bytes.Buffer)
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
panic(err)
}
rawData = append(rawData, bint8.Bytes()...)
// int16
var dataInt16 int16 = 200
bint16 := new(bytes.Buffer)
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
panic(err)
}
rawData = append(rawData, bint16.Bytes()...)
// int32
var dataInt32 int32 = 300
bint32 := new(bytes.Buffer)
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
panic(err)
}
rawData = append(rawData, bint32.Bytes()...)
// int64
var dataInt64 int64 = 400
bint64 := new(bytes.Buffer)
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
panic(err)
}
rawData = append(rawData, bint64.Bytes()...)
// float32
var datafloat float32 = 1.1
bfloat32 := new(bytes.Buffer)
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
panic(err)
}
rawData = append(rawData, bfloat32.Bytes()...)
// float64
var datafloat64 float64 = 2.2
bfloat64 := new(bytes.Buffer)
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
panic(err)
}
rawData = append(rawData, bfloat64.Bytes()...)
log.Println("Test rawdata length:", len(rawData))
timeRange := TimeRange{
timestampMin: 0,
@ -61,8 +143,55 @@ func genInsertMsg() insertMsg {
},
}
dataFactory := NewDataFactory()
iMsg.insertMessages = append(iMsg.insertMessages, dataFactory.GetMsgStreamInsertMsgs(2)...)
// messages generate
const MSGLENGTH = 1
// insertMessages := make([]msgstream.TsMsg, 0)
for i := 0; i < MSGLENGTH; i++ {
var msg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(i),
},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: 0,
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1",
PartitionName: "default",
CollectionID: 0,
PartitionID: 1,
SegmentID: UniqueID(1),
ChannelID: "0",
Timestamps: []Timestamp{
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
},
RowIDs: []UniqueID{
UniqueID(i),
UniqueID(i),
UniqueID(i),
UniqueID(i),
UniqueID(i),
},
RowData: []*commonpb.Blob{
{Value: rawData},
{Value: rawData},
{Value: rawData},
{Value: rawData},
{Value: rawData},
},
},
}
iMsg.insertMessages = append(iMsg.insertMessages, msg)
}
fmsg := &flushMsg{
msgID: 1,

View File

@ -10,35 +10,42 @@ import (
)
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
msgStreamURL := Params.PulsarAddress
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
consumeChannels := Params.InsertChannelNames
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := util.NewUnmarshalDispatcher()
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
insertStream.SetPulsarClient(Params.PulsarAddress)
insertStream.SetPulsarClient(msgStreamURL)
unmarshalDispatcher := util.NewUnmarshalDispatcher()
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, 1024)
var stream msgstream.MsgStream = insertStream
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
return node
}
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
consumeChannels := Params.DDChannelNames
consumeSubName := Params.MsgChannelSubName
ddStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
ddStream.SetPulsarClient(Params.PulsarAddress)
unmarshalDispatcher := util.NewUnmarshalDispatcher()
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, 1024)
var stream msgstream.MsgStream = ddStream
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := util.NewUnmarshalDispatcher()
tmpStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
tmpStream.SetPulsarClient(Params.PulsarAddress)
tmpStream.CreatePulsarConsumers(Params.DDChannelNames, consumeSubName, unmarshalDispatcher, 1024)
var stream msgstream.MsgStream = tmpStream
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
return node
}

View File

@ -0,0 +1,38 @@
package dataservice
import (
"strconv"
"sync"
)
type (
insertChannelManager struct {
mu sync.RWMutex
count int
channelGroups map[UniqueID][]string // collection id to channel ranges
}
)
func newInsertChannelManager() *insertChannelManager {
return &insertChannelManager{
count: 0,
channelGroups: make(map[UniqueID][]string),
}
}
func (cm *insertChannelManager) GetChannels(collectionID UniqueID) ([]string, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
if _, ok := cm.channelGroups[collectionID]; ok {
return cm.channelGroups[collectionID], nil
}
channels := Params.InsertChannelNumPerCollection
cg := make([]string, channels)
var i int64 = 0
for ; i < channels; i++ {
cg[i] = Params.InsertChannelPrefixName + strconv.Itoa(cm.count)
cm.count++
}
cm.channelGroups[collectionID] = cg
return cg, nil
}

View File

@ -0,0 +1,21 @@
package dataservice
import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetChannel(t *testing.T) {
Params.Init()
Params.InsertChannelNumPerCollection = 4
Params.InsertChannelPrefixName = "channel"
manager := newInsertChannelManager()
channels, err := manager.GetChannels(1)
assert.Nil(t, err)
assert.EqualValues(t, Params.InsertChannelNumPerCollection, len(channels))
for i := 0; i < len(channels); i++ {
assert.EqualValues(t, Params.InsertChannelPrefixName+strconv.Itoa(i), channels[i])
}
}

View File

@ -2,6 +2,7 @@ package dataservice
import (
"log"
"sort"
"sync"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
@ -22,16 +23,18 @@ type (
channelNum int
}
dataNodeCluster struct {
mu sync.RWMutex
finishCh chan struct{}
nodes []*dataNode
mu sync.RWMutex
finishCh chan struct{}
nodes []*dataNode
watchedCollection map[UniqueID]bool
}
)
func newDataNodeCluster(finishCh chan struct{}) *dataNodeCluster {
return &dataNodeCluster{
finishCh: finishCh,
nodes: make([]*dataNode, 0),
finishCh: finishCh,
nodes: make([]*dataNode, 0),
watchedCollection: make(map[UniqueID]bool),
}
}
@ -69,9 +72,13 @@ func (c *dataNodeCluster) GetNodeIDs() []int64 {
return ret
}
func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
func (c *dataNodeCluster) WatchInsertChannels(collectionID UniqueID, channels []string) {
c.mu.Lock()
defer c.mu.Unlock()
if c.watchedCollection[collectionID] {
return
}
sort.Slice(c.nodes, func(i, j int) bool { return c.nodes[i].channelNum < c.nodes[j].channelNum })
var groups [][]string
if len(channels) < len(c.nodes) {
groups = make([][]string, len(channels))
@ -102,6 +109,7 @@ func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
}
c.nodes[i].channelNum += len(group)
}
c.watchedCollection[collectionID] = true
}
func (c *dataNodeCluster) GetDataNodeStates() ([]*internalpb2.ComponentInfo, error) {
@ -145,4 +153,5 @@ func (c *dataNodeCluster) Clear() {
defer c.mu.Unlock()
c.finishCh = make(chan struct{})
c.nodes = make([]*dataNode, 0)
c.watchedCollection = make(map[UniqueID]bool)
}

View File

@ -33,7 +33,7 @@ func TestWatchChannels(t *testing.T) {
channelNum: 0,
})
}
cluster.WatchInsertChannels(c.channels)
cluster.WatchInsertChannels(c.collectionID, c.channels)
for i := 0; i < len(cluster.nodes); i++ {
assert.EqualValues(t, c.channelNums[i], cluster.nodes[i].channelNum)
}

View File

@ -24,14 +24,14 @@ type ParamTable struct {
DefaultRecordSize int64
SegIDAssignExpiration int64
InsertChannelPrefixName string
InsertChannelNum int64
StatisticsChannelName string
TimeTickChannelName string
DataNodeNum int
SegmentInfoChannelName string
DataServiceSubscriptionName string
K2SChannelNames []string
InsertChannelPrefixName string
InsertChannelNumPerCollection int64
StatisticsChannelName string
TimeTickChannelName string
DataNodeNum int
SegmentInfoChannelName string
DataServiceSubscriptionName string
K2SChannelNames []string
SegmentFlushMetaPath string
}
@ -61,7 +61,7 @@ func (p *ParamTable) Init() {
p.initDefaultRecordSize()
p.initSegIDAssignExpiration()
p.initInsertChannelPrefixName()
p.initInsertChannelNum()
p.initInsertChannelNumPerCollection()
p.initStatisticsChannelName()
p.initTimeTickChannelName()
p.initDataNodeNum()
@ -150,8 +150,8 @@ func (p *ParamTable) initInsertChannelPrefixName() {
}
}
func (p *ParamTable) initInsertChannelNum() {
p.InsertChannelNum = p.ParseInt64("dataservice.insertChannelNum")
func (p *ParamTable) initInsertChannelNumPerCollection() {
p.InsertChannelNumPerCollection = p.ParseInt64("dataservice.insertChannelNumPerCollection")
}
func (p *ParamTable) initStatisticsChannelName() {

View File

@ -85,6 +85,7 @@ type (
segAllocator segmentAllocator
statsHandler *statsHandler
ddHandler *ddHandler
insertChannelMgr *insertChannelManager
allocator allocator
cluster *dataNodeCluster
msgProducer *timesync.MsgProducer
@ -94,7 +95,6 @@ type (
k2sMsgStream msgstream.MsgStream
ddChannelName string
segmentInfoStream msgstream.MsgStream
insertChannels []string
}
)
@ -103,23 +103,14 @@ func CreateServer(ctx context.Context) (*Server, error) {
ch := make(chan struct{})
s := &Server{
ctx: ctx,
insertChannelMgr: newInsertChannelManager(),
registerFinishCh: ch,
cluster: newDataNodeCluster(ch),
}
s.insertChannels = s.getInsertChannels()
s.state.Store(internalpb2.StateCode_INITIALIZING)
return s, nil
}
func (s *Server) getInsertChannels() []string {
channels := make([]string, Params.InsertChannelNum)
var i int64 = 0
for ; i < Params.InsertChannelNum; i++ {
channels[i] = Params.InsertChannelPrefixName + strconv.FormatInt(i, 10)
}
return channels
}
func (s *Server) SetMasterClient(masterClient MasterClient) {
s.masterClient = masterClient
}
@ -146,7 +137,6 @@ func (s *Server) Start() error {
}
s.startServerLoop()
s.waitDataNodeRegister()
s.cluster.WatchInsertChannels(s.insertChannels)
if err = s.initMsgProducer(); err != nil {
return err
}
@ -685,7 +675,16 @@ func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*dat
}
func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) ([]string, error) {
return s.insertChannels, nil
if !s.checkStateIsHealthy() {
return nil, errors.New("server is initializing")
}
channels, err := s.insertChannelMgr.GetChannels(req.CollectionID)
if err != nil {
return nil, err
}
s.cluster.WatchInsertChannels(req.CollectionID, channels)
return channels, nil
}
func (s *Server) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {

View File

@ -657,18 +657,10 @@ func TestMasterService(t *testing.T) {
rsp, err := core.DescribeIndex(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.IndexDescriptions), 3)
indexNames := make([]string, 0)
for _, d := range rsp.IndexDescriptions {
indexNames = append(indexNames, d.IndexName)
}
assert.ElementsMatch(t, indexNames, []string{
"index_field_100_0",
"index_field_100_1",
Params.DefaultIndexName,
})
assert.Equal(t, rsp.IndexDescriptions[0].IndexName, Params.DefaultIndexName)
assert.Equal(t, rsp.IndexDescriptions[1].IndexName, "index_field_100_0")
assert.Equal(t, rsp.IndexDescriptions[2].IndexName, "index_field_100_1")
})
t.Run("drop partition", func(t *testing.T) {

View File

@ -2,258 +2,103 @@ package rmqms
import (
"context"
"errors"
"log"
"reflect"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/gogo/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
rocksmq "github.com/zilliztech/milvus-distributed/internal/util/rocksmq"
"github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
)
type RmqMsgStream struct {
isServing int64
idAllocator *masterservice.GlobalIDAllocator
ctx context.Context
serverLoopWg sync.WaitGroup
serverLoopCtx context.Context
serverLoopCancel func()
rmq *rocksmq.RocksMQ
repackFunc msgstream.RepackFunc
consumers []rocksmq.Consumer
producers []string
unmarshal *util.UnmarshalDispatcher
receiveBuf chan *msgstream.MsgPack
wait *sync.WaitGroup
// tso ticker
streamCancel func()
tsoTicker *time.Ticker
}
func NewRmqMsgStream(ctx context.Context, rmq *rocksmq.RocksMQ, receiveBufSize int64) *RmqMsgStream {
streamCtx, streamCancel := context.WithCancel(ctx)
receiveBuf := make(chan *msgstream.MsgPack, receiveBufSize)
stream := &RmqMsgStream{
ctx: streamCtx,
rmq: nil,
receiveBuf: receiveBuf,
streamCancel: streamCancel,
}
func NewRmqMsgStream() *RmqMsgStream {
//idAllocator := master.NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{""}, "singleNode/rocksmq", "gid"))
//if err := idAllocator.Initialize(); err != nil {
// return nil
//}
//
//return &RmqMsgStream{
// idAllocator: idAllocator,
//}
return stream
return nil
}
func (ms *RmqMsgStream) startServerLoop(ctx context.Context) error {
ms.serverLoopCtx, ms.serverLoopCancel = context.WithCancel(ctx)
ms.serverLoopWg.Add(1)
go ms.tsLoop()
return nil
}
func (ms *RmqMsgStream) stopServerLoop() {
ms.serverLoopCancel()
ms.serverLoopWg.Wait()
}
func (ms *RmqMsgStream) tsLoop() {
defer ms.serverLoopWg.Done()
ms.tsoTicker = time.NewTicker(masterservice.UpdateTimestampStep)
defer ms.tsoTicker.Stop()
ctx, cancel := context.WithCancel(ms.serverLoopCtx)
defer cancel()
for {
select {
case <-ms.tsoTicker.C:
if err := ms.idAllocator.UpdateID(); err != nil {
log.Println("failed to update id", err)
return
}
case <-ctx.Done():
// Server is closed and it should return nil.
log.Println("tsLoop is closed")
return
}
}
}
func (ms *RmqMsgStream) Start() {
ms.wait = &sync.WaitGroup{}
if ms.consumers != nil {
ms.wait.Add(1)
go ms.bufMsgPackToChannel()
if err := ms.startServerLoop(ms.ctx); err != nil {
return
}
atomic.StoreInt64(&ms.isServing, 1)
}
func (ms *RmqMsgStream) Close() {
}
func (ms *RmqMsgStream) CreateProducers(channels []string) error {
for _, channel := range channels {
// TODO(yhz): Here may allow to create an existing channel
if err := ms.rmq.CreateChannel(channel); err != nil {
return err
}
if !atomic.CompareAndSwapInt64(&ms.isServing, 1, 0) {
// server is already closed
return
}
return nil
}
func (ms *RmqMsgStream) CreateConsumers(channels []string, groupName string) error {
for _, channelName := range channels {
if err := ms.rmq.CreateConsumerGroup(groupName, channelName); err != nil {
return err
}
msgNum := make(chan int)
ms.consumers = append(ms.consumers, rocksmq.Consumer{GroupName: groupName, ChannelName: channelName, MsgNum: msgNum})
}
return nil
log.Print("closing server")
ms.stopServerLoop()
}
func (ms *RmqMsgStream) Produce(pack *msgstream.MsgPack) error {
tsMsgs := pack.Msgs
if len(tsMsgs) <= 0 {
log.Printf("Warning: Receive empty msgPack")
return nil
}
if len(ms.producers) <= 0 {
return errors.New("nil producer in msg stream")
}
reBucketValues := make([][]int32, len(tsMsgs))
for channelID, tsMsg := range tsMsgs {
hashValues := tsMsg.HashKeys()
bucketValues := make([]int32, len(hashValues))
for index, hashValue := range hashValues {
if tsMsg.Type() == commonpb.MsgType_kSearchResult {
searchResult := tsMsg.(*msgstream.SearchResultMsg)
channelID := searchResult.ResultChannelID
channelIDInt, _ := strconv.ParseInt(channelID, 10, 64)
if channelIDInt >= int64(len(ms.producers)) {
return errors.New("Failed to produce pulsar msg to unKnow channel")
}
bucketValues[index] = int32(channelIDInt)
continue
}
bucketValues[index] = int32(hashValue % uint32(len(ms.producers)))
}
reBucketValues[channelID] = bucketValues
}
var result map[int32]*msgstream.MsgPack
var err error
if ms.repackFunc != nil {
result, err = ms.repackFunc(tsMsgs, reBucketValues)
} else {
msgType := (tsMsgs[0]).Type()
switch msgType {
case commonpb.MsgType_kInsert:
result, err = util.InsertRepackFunc(tsMsgs, reBucketValues)
case commonpb.MsgType_kDelete:
result, err = util.DeleteRepackFunc(tsMsgs, reBucketValues)
default:
result, err = util.DefaultRepackFunc(tsMsgs, reBucketValues)
}
}
if err != nil {
return err
}
for k, v := range result {
for i := 0; i < len(v.Msgs); i++ {
mb, err := v.Msgs[i].Marshal(v.Msgs[i])
if err != nil {
return err
}
//
//msg := &pulsar.ProducerMessage{Payload: mb}
//var child opentracing.Span
if v.Msgs[i].Type() == commonpb.MsgType_kInsert ||
v.Msgs[i].Type() == commonpb.MsgType_kSearch ||
v.Msgs[i].Type() == commonpb.MsgType_kSearchResult {
//tracer := opentracing.GlobalTracer()
//ctx := v.Msgs[i].GetMsgContext()
//if ctx == nil {
// ctx = context.Background()
//}
//
//if parent := opentracing.SpanFromContext(ctx); parent != nil {
// child = tracer.StartSpan("start send pulsar msg",
// opentracing.FollowsFrom(parent.Context()))
//} else {
// child = tracer.StartSpan("start send pulsar msg")
//}
//child.SetTag("hash keys", v.Msgs[i].HashKeys())
//child.SetTag("start time", v.Msgs[i].BeginTs())
//child.SetTag("end time", v.Msgs[i].EndTs())
//child.SetTag("msg type", v.Msgs[i].Type())
//msg.Properties = make(map[string]string)
//err = tracer.Inject(child.Context(), opentracing.TextMap, &propertiesReaderWriter{msg.Properties})
//if err != nil {
// child.LogFields(oplog.Error(err))
// child.Finish()
// return err
//}
//child.LogFields(oplog.String("inject success", "inject success"))
}
msg := make([]rocksmq.ProducerMessage, 0)
msg = append(msg, *rocksmq.NewProducerMessage(mb))
if err := ms.rmq.Produce(ms.producers[k], msg); err != nil {
return err
}
}
}
return nil
}
func (ms *RmqMsgStream) Consume() *msgstream.MsgPack {
for {
select {
case cm, ok := <-ms.receiveBuf:
if !ok {
log.Println("buf chan closed")
return nil
}
return cm
case <-ms.ctx.Done():
log.Printf("context closed")
return nil
}
}
}
func (ms *RmqMsgStream) bufMsgPackToChannel() {
defer ms.wait.Done()
cases := make([]reflect.SelectCase, len(ms.consumers))
for i := 0; i < len(ms.consumers); i++ {
ch := ms.consumers[i].MsgNum
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}
}
for {
select {
case <-ms.ctx.Done():
log.Println("done")
return
default:
tsMsgList := make([]msgstream.TsMsg, 0)
for {
chosen, value, ok := reflect.Select(cases)
if !ok {
log.Printf("channel closed")
return
}
msgNum := value.Interface().(int)
rmqMsg, err := ms.rmq.Consume(ms.consumers[chosen].GroupName, ms.consumers[chosen].ChannelName, msgNum)
if err != nil {
log.Printf("Failed to consume message in rocksmq, error = %v", err)
continue
}
for j := 0; j < len(rmqMsg); j++ {
headerMsg := commonpb.MsgHeader{}
err := proto.Unmarshal(rmqMsg[j].Payload, &headerMsg)
if err != nil {
log.Printf("Failed to unmarshal message header, error = %v", err)
continue
}
tsMsg, err := ms.unmarshal.Unmarshal(rmqMsg[j].Payload, headerMsg.Base.MsgType)
if err != nil {
log.Printf("Failed to unmarshal tsMsg, error = %v", err)
continue
}
tsMsgList = append(tsMsgList, tsMsg)
}
noMoreMessage := true
for k := 0; k < len(ms.consumers); k++ {
if len(ms.consumers[k].MsgNum) > 0 {
noMoreMessage = false
}
}
if noMoreMessage {
break
}
}
if len(tsMsgList) > 0 {
msgPack := util.MsgPack{Msgs: tsMsgList}
ms.receiveBuf <- &msgPack
}
}
}
return nil
}
func (ms *RmqMsgStream) Chan() <-chan *msgstream.MsgPack {

View File

@ -263,7 +263,7 @@ message SearchResults {
message FlushRequest {
common.MsgBase base = 1;
string db_name = 2;
repeated string collection_names = 3;
string collection_name = 3;
}
service MilvusService {

View File

@ -2261,7 +2261,7 @@ func (m *SearchResults) GetHits() [][]byte {
type FlushRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"`
CollectionNames []string `protobuf:"bytes,3,rep,name=collection_names,json=collectionNames,proto3" json:"collection_names,omitempty"`
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -2306,11 +2306,11 @@ func (m *FlushRequest) GetDbName() string {
return ""
}
func (m *FlushRequest) GetCollectionNames() []string {
func (m *FlushRequest) GetCollectionName() string {
if m != nil {
return m.CollectionNames
return m.CollectionName
}
return nil
return ""
}
type RegisterLinkResponse struct {
@ -2408,113 +2408,112 @@ func init() {
func init() { proto.RegisterFile("milvus.proto", fileDescriptor_02345ba45cc0e303) }
var fileDescriptor_02345ba45cc0e303 = []byte{
// 1685 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x6f, 0xdb, 0x46,
0x12, 0x37, 0x25, 0x59, 0xb6, 0xc7, 0x94, 0x2c, 0xaf, 0xbf, 0x14, 0x25, 0xb9, 0x38, 0x7b, 0x97,
0xb3, 0xf3, 0x65, 0x1f, 0x9c, 0xfb, 0x7c, 0x38, 0x20, 0xb1, 0xe5, 0x38, 0x42, 0x12, 0xc7, 0x47,
0xf9, 0x72, 0xe7, 0x0b, 0x02, 0x1d, 0x25, 0xee, 0x49, 0xbc, 0x50, 0xa4, 0xca, 0x5d, 0xf9, 0x23,
0x4f, 0x45, 0x53, 0xb4, 0x40, 0x5b, 0xb4, 0xcf, 0x7d, 0x2d, 0xd0, 0xd7, 0xa2, 0x4d, 0x8b, 0xfe,
0x07, 0x05, 0xfa, 0xbf, 0x14, 0x45, 0xff, 0x82, 0x02, 0x05, 0x97, 0x14, 0x45, 0xd2, 0x4b, 0x4b,
0x8d, 0x9a, 0x5a, 0x7e, 0xd3, 0x8e, 0x66, 0x67, 0x7f, 0xfb, 0x9b, 0x99, 0xdd, 0xd9, 0x21, 0xc8,
0x4d, 0xdd, 0xd8, 0x6f, 0xd3, 0x95, 0x96, 0x6d, 0x31, 0x0b, 0xcd, 0x04, 0x47, 0x2b, 0xee, 0xa0,
0x20, 0xd7, 0xac, 0x66, 0xd3, 0x32, 0x5d, 0x61, 0x41, 0xa6, 0xb5, 0x06, 0x69, 0xaa, 0xee, 0x08,
0x7f, 0x2a, 0xc1, 0xc2, 0x86, 0x4d, 0x54, 0x46, 0x36, 0x2c, 0xc3, 0x20, 0x35, 0xa6, 0x5b, 0xa6,
0x42, 0xde, 0x68, 0x13, 0xca, 0xd0, 0x1f, 0x20, 0x55, 0x55, 0x29, 0xc9, 0x4b, 0x8b, 0xd2, 0xf2,
0xe4, 0xda, 0x85, 0x95, 0x90, 0x6d, 0xcf, 0xe6, 0x43, 0x5a, 0x5f, 0x57, 0x29, 0x51, 0xb8, 0x26,
0x5a, 0x80, 0x31, 0xad, 0x5a, 0x31, 0xd5, 0x26, 0xc9, 0x27, 0x16, 0xa5, 0xe5, 0x09, 0x25, 0xad,
0x55, 0xb7, 0xd5, 0x26, 0x41, 0x4b, 0x30, 0x55, 0xf3, 0xed, 0xbb, 0x0a, 0x49, 0xae, 0x90, 0xed,
0x8a, 0xb9, 0xe2, 0x3c, 0xa4, 0x5d, 0x7c, 0xf9, 0xd4, 0xa2, 0xb4, 0x2c, 0x2b, 0xde, 0x08, 0xbf,
0x2f, 0xc1, 0x5c, 0xd1, 0xb6, 0x5a, 0x43, 0x81, 0x12, 0xbf, 0x27, 0xc1, 0xec, 0x3d, 0x95, 0x0e,
0x07, 0x98, 0x3d, 0x90, 0xd7, 0x2d, 0xcb, 0x50, 0x08, 0x6d, 0x59, 0x26, 0x25, 0xe8, 0x16, 0xa4,
0x29, 0x53, 0x59, 0x9b, 0x7a, 0x28, 0xce, 0x0b, 0x51, 0x94, 0xb9, 0x8a, 0xe2, 0xa9, 0xa2, 0x59,
0x18, 0xdd, 0x57, 0x8d, 0xb6, 0x0b, 0x62, 0x5c, 0x71, 0x07, 0xf8, 0x09, 0x64, 0xcb, 0xcc, 0xd6,
0xcd, 0xfa, 0x2f, 0x68, 0x7c, 0xa2, 0x63, 0xfc, 0x23, 0x09, 0xce, 0x15, 0x09, 0xad, 0xd9, 0x7a,
0x75, 0x38, 0x82, 0x0f, 0x7f, 0x25, 0x41, 0x41, 0x84, 0x68, 0x90, 0xbd, 0xff, 0xdd, 0x0f, 0xe8,
0x04, 0x9f, 0x74, 0x25, 0x3c, 0xc9, 0x4b, 0xc6, 0xee, 0x6a, 0x65, 0x2e, 0xe8, 0xc4, 0x3d, 0xc2,
0x20, 0x77, 0x41, 0x96, 0x8a, 0x1c, 0x78, 0x52, 0x09, 0xc9, 0x78, 0x6e, 0x3c, 0xb0, 0x54, 0x6d,
0x38, 0x48, 0xfc, 0x50, 0x82, 0xbc, 0x42, 0x0c, 0xa2, 0xd2, 0x21, 0xf1, 0xea, 0x07, 0x12, 0xcc,
0x07, 0xf8, 0x65, 0x2a, 0xa3, 0xa7, 0x09, 0xe7, 0x5d, 0xe7, 0xc4, 0x8d, 0xc2, 0x19, 0x24, 0xc2,
0xfe, 0x02, 0xa3, 0xce, 0x2f, 0x9a, 0x4f, 0x2c, 0x26, 0x97, 0x27, 0xd7, 0x2e, 0x0b, 0xe7, 0xdc,
0x27, 0x47, 0x8f, 0x9d, 0xac, 0xdb, 0x51, 0x75, 0x5b, 0x71, 0xf5, 0x71, 0x15, 0xe6, 0xca, 0x0d,
0xeb, 0xe0, 0x75, 0x7a, 0x09, 0x1f, 0xc2, 0x7c, 0x74, 0x8d, 0x41, 0xf6, 0x7a, 0x15, 0x72, 0x11,
0x96, 0xdd, 0x6d, 0x4f, 0x28, 0x53, 0x61, 0x9a, 0x29, 0xfe, 0xd2, 0x71, 0x3b, 0xbf, 0xd9, 0x76,
0x54, 0x9b, 0xe9, 0xa7, 0x7d, 0xb1, 0x5d, 0x81, 0x6c, 0xab, 0x83, 0xc3, 0xd5, 0x4b, 0x71, 0xbd,
0x8c, 0x2f, 0xe5, 0x7c, 0x7d, 0x21, 0xc1, 0xac, 0x73, 0xcf, 0x9d, 0x25, 0xcc, 0x9f, 0x4b, 0x30,
0x73, 0x4f, 0xa5, 0x67, 0x09, 0xf2, 0x4b, 0x09, 0x66, 0x9c, 0x23, 0xd3, 0xc5, 0x7c, 0xba, 0x90,
0x97, 0x60, 0x2a, 0x0c, 0x99, 0xe6, 0x53, 0x3c, 0xa4, 0xb3, 0x21, 0xcc, 0x14, 0x7f, 0x2d, 0xc1,
0x82, 0x77, 0xb2, 0x0e, 0x05, 0xd7, 0x7d, 0x03, 0x7f, 0x29, 0xc1, 0x9c, 0x8f, 0xf8, 0xb4, 0x0f,
0xe0, 0x7e, 0x43, 0xe4, 0x1d, 0x09, 0xe6, 0xa3, 0xa0, 0x4f, 0xe5, 0x98, 0xfe, 0x4c, 0x82, 0x59,
0xe7, 0x0c, 0x1d, 0x0a, 0x9f, 0x47, 0xeb, 0x91, 0x94, 0xa0, 0x1e, 0xf9, 0x58, 0x72, 0x2f, 0x96,
0x00, 0xe0, 0x41, 0x88, 0x13, 0x84, 0x59, 0x42, 0x14, 0x66, 0x0e, 0x36, 0x5f, 0x52, 0x2a, 0xd2,
0x7c, 0x72, 0x31, 0xe9, 0x60, 0x0b, 0xca, 0x78, 0x31, 0xd0, 0x29, 0xf1, 0xca, 0xa4, 0xde, 0x24,
0x26, 0x7b, 0x75, 0x3a, 0xa3, 0x64, 0x24, 0x8e, 0x93, 0x81, 0x2e, 0xc0, 0x04, 0x75, 0xd7, 0xf1,
0xab, 0xb7, 0xae, 0x00, 0xbf, 0x25, 0xc1, 0xc2, 0x31, 0x38, 0x83, 0x90, 0x95, 0x87, 0x31, 0xdd,
0xd4, 0xc8, 0xa1, 0x8f, 0xa6, 0x33, 0x74, 0xfe, 0xa9, 0xb6, 0x75, 0x43, 0xf3, 0x61, 0x74, 0x86,
0x0e, 0x27, 0xc8, 0xf1, 0xd7, 0xaf, 0xc2, 0xc7, 0x22, 0x4c, 0x06, 0x1c, 0xe2, 0x41, 0x09, 0x8a,
0xf0, 0xff, 0x61, 0x26, 0x84, 0x66, 0x10, 0x3a, 0x7e, 0x03, 0xe0, 0x93, 0xed, 0x86, 0x4d, 0x52,
0x09, 0x48, 0xf0, 0xf7, 0x12, 0x20, 0xb7, 0x48, 0x28, 0x39, 0x34, 0x9d, 0x66, 0x66, 0x5d, 0x04,
0xf8, 0x9f, 0x4e, 0x0c, 0x2d, 0x78, 0x24, 0x4d, 0x70, 0x09, 0xff, 0xbb, 0x08, 0x32, 0x39, 0x64,
0xb6, 0x5a, 0x69, 0xa9, 0xb6, 0xda, 0xa4, 0xf9, 0xd1, 0x7e, 0x4f, 0x91, 0x49, 0x3e, 0x6d, 0x87,
0xcf, 0xc2, 0xdf, 0x3a, 0xe5, 0x85, 0x17, 0x6f, 0xc3, 0xbe, 0xe3, 0x8b, 0x00, 0x3c, 0x76, 0xdd,
0xbf, 0x47, 0xdd, 0xbf, 0xb9, 0x84, 0x9f, 0xcf, 0x06, 0xe4, 0xf8, 0x0e, 0xdc, 0xed, 0xb4, 0x1c,
0xab, 0x91, 0x29, 0x52, 0x64, 0x0a, 0xfa, 0x1b, 0xa4, 0x3d, 0xf6, 0xfa, 0x3e, 0x83, 0xbd, 0x09,
0xf8, 0x13, 0x09, 0xe6, 0x22, 0xc4, 0x0d, 0x12, 0x97, 0xbb, 0x80, 0x5c, 0xa0, 0x5a, 0x17, 0x7d,
0x07, 0x55, 0xe4, 0x85, 0xe8, 0x0d, 0xa2, 0x7b, 0x55, 0xa6, 0xf5, 0x88, 0x84, 0xe2, 0x6f, 0x24,
0x98, 0xe6, 0x7a, 0xce, 0x6a, 0xe4, 0xec, 0xba, 0xf6, 0x4d, 0x09, 0x50, 0x70, 0x1f, 0x83, 0x30,
0xfd, 0x27, 0xf7, 0xda, 0x75, 0x77, 0x92, 0x5d, 0xbb, 0x24, 0x9c, 0x13, 0x58, 0xcc, 0xd5, 0xc6,
0x3f, 0x4a, 0x90, 0x29, 0x99, 0x94, 0xd8, 0x6c, 0xf8, 0x4b, 0x15, 0xf4, 0x47, 0x18, 0xb7, 0xad,
0x83, 0x8a, 0xa6, 0x32, 0xd5, 0x3b, 0x17, 0xce, 0x09, 0xe1, 0xad, 0x1b, 0x56, 0x55, 0x19, 0xb3,
0xad, 0x83, 0xa2, 0xca, 0x54, 0x74, 0x1e, 0x26, 0x1a, 0x2a, 0x6d, 0x54, 0x9e, 0x91, 0x23, 0x9a,
0x4f, 0x2f, 0x26, 0x97, 0x33, 0xca, 0xb8, 0x23, 0xb8, 0x4f, 0x8e, 0x28, 0x7e, 0x21, 0x41, 0xb6,
0xb3, 0xff, 0x41, 0xe8, 0xbf, 0x04, 0x93, 0xb6, 0x75, 0x50, 0x2a, 0x56, 0xaa, 0xa4, 0xae, 0x9b,
0xde, 0x8d, 0x00, 0x5c, 0xb4, 0xee, 0x48, 0x1c, 0x14, 0xae, 0x02, 0x31, 0x35, 0xef, 0x36, 0x18,
0xe7, 0x82, 0x4d, 0x53, 0xc3, 0xfb, 0x90, 0xdb, 0x31, 0xd4, 0x1a, 0x69, 0x58, 0x86, 0x46, 0x6c,
0x9e, 0x95, 0x28, 0x07, 0x49, 0xa6, 0xd6, 0xbd, 0xe4, 0x76, 0x7e, 0xa2, 0xbf, 0x42, 0x8a, 0x1d,
0xb5, 0x3a, 0x1e, 0xfe, 0x9d, 0x30, 0x7d, 0x02, 0x66, 0x76, 0x8f, 0x5a, 0x44, 0xe1, 0x33, 0xd0,
0x3c, 0xa4, 0x79, 0x2f, 0xca, 0xad, 0x15, 0x64, 0xc5, 0x1b, 0xe1, 0xa7, 0xa1, 0x75, 0xb7, 0x6c,
0xab, 0xdd, 0x42, 0x25, 0x90, 0x5b, 0x5d, 0x99, 0x43, 0x42, 0x7c, 0xb2, 0x46, 0x41, 0x2b, 0xa1,
0xa9, 0xf8, 0x3b, 0x09, 0x32, 0x65, 0xa2, 0xda, 0xb5, 0xc6, 0x59, 0x28, 0xdf, 0x1d, 0xc6, 0x35,
0x6a, 0x78, 0x69, 0xea, 0xfc, 0x44, 0xd7, 0x61, 0x3a, 0xb0, 0xa1, 0x4a, 0xdd, 0x21, 0x28, 0x9f,
0xe6, 0x0d, 0xdb, 0x5c, 0x2b, 0x42, 0x1c, 0xbe, 0x0f, 0xa9, 0x7b, 0x3a, 0xe3, 0x66, 0x9c, 0x4b,
0x58, 0xe2, 0x97, 0xb0, 0xf3, 0x13, 0x9d, 0x0b, 0xc4, 0x6d, 0x82, 0x3b, 0xc0, 0x0f, 0x4e, 0xde,
0x07, 0xb6, 0x6c, 0xcf, 0x33, 0x09, 0xc5, 0x1b, 0xe1, 0x7f, 0x77, 0x99, 0xa3, 0x6d, 0x83, 0xd1,
0x57, 0x8b, 0x4a, 0x04, 0xa9, 0x86, 0xee, 0x95, 0xe2, 0xb2, 0xc2, 0x7f, 0xe3, 0xb7, 0x25, 0x90,
0xef, 0x1a, 0x6d, 0xfa, 0x3a, 0x7c, 0x22, 0x6a, 0x5b, 0x24, 0xc5, 0x6d, 0x8b, 0x17, 0x12, 0xcc,
0x2a, 0xa4, 0xae, 0x53, 0x46, 0xec, 0x07, 0xba, 0xf9, 0xcc, 0x4f, 0xbf, 0x3f, 0xc3, 0x98, 0xaa,
0x69, 0x36, 0xa1, 0xf4, 0x44, 0x44, 0x77, 0x5c, 0x1d, 0xa5, 0xa3, 0x1c, 0x20, 0x28, 0xd1, 0x37,
0x41, 0xd7, 0x6e, 0xc3, 0x54, 0x24, 0x63, 0xd0, 0x38, 0xa4, 0xb6, 0x1f, 0x6d, 0x6f, 0xe6, 0x46,
0xd0, 0x34, 0x64, 0x1e, 0x6f, 0x6e, 0xec, 0x3e, 0x52, 0x2a, 0xeb, 0xa5, 0xed, 0x3b, 0xca, 0x5e,
0x4e, 0x43, 0x39, 0x90, 0x3d, 0xd1, 0xdd, 0x07, 0x8f, 0xee, 0xec, 0xe6, 0xc8, 0xda, 0x0f, 0x39,
0xc8, 0x3c, 0xe4, 0x0b, 0x95, 0x89, 0xbd, 0xaf, 0xd7, 0x08, 0xaa, 0x40, 0x2e, 0xfa, 0xa5, 0x01,
0xdd, 0x10, 0xa6, 0x4f, 0xcc, 0x07, 0x89, 0xc2, 0x49, 0xd0, 0xf1, 0x08, 0x7a, 0x02, 0xd9, 0xf0,
0x27, 0x02, 0x74, 0x4d, 0x68, 0x5e, 0xf8, 0x1d, 0xa1, 0x97, 0xf1, 0x0a, 0x64, 0x42, 0x1d, 0x7f,
0x74, 0x55, 0x68, 0x5b, 0xf4, 0x55, 0xa0, 0x70, 0x59, 0xa8, 0x1a, 0x6c, 0xda, 0xbb, 0xe8, 0xc3,
0x4d, 0xdc, 0x18, 0xf4, 0xc2, 0x4e, 0x6f, 0x2f, 0xf4, 0x2a, 0x4c, 0x1f, 0xeb, 0xc9, 0xa2, 0x9b,
0x42, 0xfb, 0x71, 0xbd, 0xdb, 0x5e, 0x4b, 0x1c, 0x00, 0x3a, 0xde, 0x3b, 0x47, 0x2b, 0x62, 0x0f,
0xc4, 0xb5, 0xfd, 0x0b, 0xab, 0x7d, 0xeb, 0xfb, 0xc4, 0xed, 0xc3, 0xc2, 0x16, 0x61, 0xe1, 0x96,
0xaa, 0x4e, 0x99, 0x5e, 0xa3, 0xe8, 0xba, 0x38, 0xbc, 0x84, 0xcd, 0xe0, 0xc2, 0x8d, 0xfe, 0x94,
0xfd, 0x75, 0x0d, 0x98, 0x0a, 0xb7, 0x36, 0x69, 0x8c, 0xc7, 0x84, 0x4d, 0xd6, 0xc2, 0xf5, 0xbe,
0x74, 0xfd, 0xd5, 0x9e, 0xc2, 0x54, 0xa4, 0x9b, 0x19, 0xb7, 0x3b, 0x61, 0xcf, 0xb3, 0x97, 0xf7,
0xf6, 0x20, 0x13, 0x6a, 0x3b, 0xc6, 0x84, 0xb7, 0xa8, 0x35, 0xd9, 0xcb, 0xf4, 0x53, 0x90, 0x83,
0xdd, 0x41, 0xb4, 0x1c, 0x97, 0x38, 0xc7, 0x0c, 0xf7, 0x95, 0x37, 0x7b, 0x6e, 0xde, 0xf8, 0x93,
0x69, 0xcc, 0x02, 0x82, 0x76, 0x5f, 0x2f, 0xe4, 0xff, 0xf5, 0xb3, 0x26, 0x60, 0xfd, 0xc6, 0x49,
0x59, 0xf3, 0x73, 0xb9, 0xa1, 0x30, 0xbf, 0x45, 0x58, 0xa8, 0xcd, 0xe4, 0x85, 0xae, 0x38, 0x94,
0x84, 0x5d, 0xb4, 0x98, 0x50, 0x12, 0x37, 0xaf, 0xf0, 0x08, 0xd2, 0x21, 0x1b, 0x6a, 0xcf, 0xd0,
0x18, 0x67, 0x8b, 0x9a, 0x4e, 0x85, 0x6b, 0xfd, 0xa8, 0xfa, 0x4b, 0xfd, 0x13, 0x26, 0x03, 0xcf,
0x6b, 0xb4, 0x74, 0x42, 0xc4, 0x06, 0x9f, 0xa3, 0xbd, 0x68, 0x6b, 0x40, 0x26, 0xf4, 0x18, 0x8b,
0x8b, 0x56, 0xc1, 0x4b, 0x37, 0x66, 0x03, 0xc2, 0xb7, 0x1d, 0x1e, 0x41, 0x55, 0xc8, 0x6c, 0x11,
0xd6, 0x7d, 0x1f, 0xa0, 0xdf, 0xc7, 0xbf, 0xce, 0x82, 0xaf, 0xae, 0xc2, 0x52, 0x4f, 0x3d, 0x7f,
0x8d, 0x32, 0xa4, 0xdd, 0x52, 0x1b, 0xe1, 0x98, 0x49, 0x81, 0x77, 0x48, 0xe1, 0xb7, 0x27, 0xea,
0xf8, 0x46, 0x15, 0x48, 0xbb, 0x85, 0x52, 0x8c, 0xd1, 0x50, 0xfd, 0x59, 0x38, 0x59, 0x87, 0x57,
0x5a, 0x78, 0x04, 0x95, 0x60, 0x94, 0x57, 0x48, 0x48, 0x9c, 0x98, 0xc1, 0xea, 0xa9, 0x97, 0x07,
0xff, 0x01, 0xf2, 0x16, 0x61, 0x45, 0x6d, 0xa3, 0xa1, 0x9a, 0x26, 0x31, 0x50, 0x41, 0xa8, 0xbe,
0xd9, 0x6c, 0xb1, 0xa3, 0x98, 0x1d, 0x87, 0x3f, 0x4c, 0xe3, 0x91, 0xb5, 0x3a, 0xc8, 0x3b, 0xb6,
0x75, 0x78, 0xd4, 0xa9, 0x37, 0xfe, 0x05, 0x72, 0xb0, 0x90, 0x3a, 0x71, 0x89, 0xab, 0x31, 0x49,
0x7d, 0xbc, 0x0e, 0xc3, 0x23, 0xeb, 0xeb, 0xff, 0xb9, 0x5d, 0xd7, 0x59, 0xa3, 0x5d, 0x75, 0x6c,
0xac, 0x3e, 0xd7, 0x0d, 0x43, 0x7f, 0xce, 0x48, 0xad, 0xb1, 0xea, 0x4e, 0xbb, 0xa9, 0xe9, 0x94,
0xd9, 0x7a, 0xb5, 0xcd, 0x88, 0xb6, 0xaa, 0x9b, 0x8c, 0xd8, 0xa6, 0x6a, 0xac, 0x72, 0xc3, 0x9e,
0x46, 0xab, 0x5a, 0x4d, 0xf3, 0xf1, 0xad, 0x9f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x14, 0x2f, 0x38,
0x4a, 0xbf, 0x21, 0x00, 0x00,
// 1676 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x6f, 0x1b, 0xc5,
0x16, 0xcf, 0xda, 0x8e, 0x93, 0x9c, 0xac, 0x1d, 0x67, 0xf2, 0xe5, 0xba, 0xed, 0x6d, 0xba, 0xf7,
0xf6, 0x26, 0xfd, 0x4a, 0xae, 0xd2, 0xfb, 0xf9, 0x70, 0xa5, 0x36, 0x71, 0x9a, 0x5a, 0x6d, 0xd3,
0xdc, 0x75, 0x6e, 0x21, 0x54, 0x95, 0x59, 0x7b, 0x07, 0x7b, 0xe9, 0x7a, 0xd7, 0xec, 0x8c, 0xf3,
0xd1, 0x27, 0x04, 0x12, 0x48, 0x80, 0xe0, 0x99, 0x57, 0x24, 0x5e, 0x11, 0x14, 0xc4, 0x7f, 0x80,
0xc4, 0xff, 0x82, 0x10, 0x7f, 0x01, 0x12, 0xda, 0x99, 0xf5, 0x7a, 0x77, 0x33, 0x1b, 0x9b, 0x1a,
0x88, 0xf3, 0xe6, 0x39, 0x3e, 0x73, 0xe6, 0x37, 0xbf, 0x73, 0xce, 0xcc, 0x99, 0xb3, 0x20, 0x37,
0x0d, 0x73, 0xbf, 0x4d, 0x56, 0x5a, 0x8e, 0x4d, 0x6d, 0x34, 0x13, 0x1c, 0xad, 0xf0, 0x41, 0x41,
0xae, 0xd9, 0xcd, 0xa6, 0x6d, 0x71, 0x61, 0x41, 0x26, 0xb5, 0x06, 0x6e, 0x6a, 0x7c, 0xa4, 0x7c,
0x2e, 0xc1, 0xc2, 0x86, 0x83, 0x35, 0x8a, 0x37, 0x6c, 0xd3, 0xc4, 0x35, 0x6a, 0xd8, 0x96, 0x8a,
0xdf, 0x6a, 0x63, 0x42, 0xd1, 0xdf, 0x20, 0x55, 0xd5, 0x08, 0xce, 0x4b, 0x8b, 0xd2, 0xf2, 0xe4,
0xda, 0x85, 0x95, 0x90, 0x6d, 0xcf, 0xe6, 0x43, 0x52, 0x5f, 0xd7, 0x08, 0x56, 0x99, 0x26, 0x5a,
0x80, 0x31, 0xbd, 0x5a, 0xb1, 0xb4, 0x26, 0xce, 0x27, 0x16, 0xa5, 0xe5, 0x09, 0x35, 0xad, 0x57,
0xb7, 0xb5, 0x26, 0x46, 0x4b, 0x30, 0x55, 0xf3, 0xed, 0x73, 0x85, 0x24, 0x53, 0xc8, 0x76, 0xc5,
0x4c, 0x71, 0x1e, 0xd2, 0x1c, 0x5f, 0x3e, 0xb5, 0x28, 0x2d, 0xcb, 0xaa, 0x37, 0x52, 0x3e, 0x94,
0x60, 0xae, 0xe8, 0xd8, 0xad, 0xa1, 0x40, 0xa9, 0x7c, 0x20, 0xc1, 0xec, 0x3d, 0x8d, 0x0c, 0x07,
0x98, 0x3d, 0x90, 0xd7, 0x6d, 0xdb, 0x54, 0x31, 0x69, 0xd9, 0x16, 0xc1, 0xe8, 0x16, 0xa4, 0x09,
0xd5, 0x68, 0x9b, 0x78, 0x28, 0xce, 0x0b, 0x51, 0x94, 0x99, 0x8a, 0xea, 0xa9, 0xa2, 0x59, 0x18,
0xdd, 0xd7, 0xcc, 0x36, 0x07, 0x31, 0xae, 0xf2, 0x81, 0xf2, 0x04, 0xb2, 0x65, 0xea, 0x18, 0x56,
0xfd, 0x37, 0x34, 0x3e, 0xd1, 0x31, 0xfe, 0x89, 0x04, 0xe7, 0x8a, 0x98, 0xd4, 0x1c, 0xa3, 0x3a,
0x1c, 0xc1, 0xa7, 0x7c, 0x23, 0x41, 0x41, 0x84, 0x68, 0x90, 0xbd, 0xff, 0xd7, 0x0f, 0xe8, 0x04,
0x9b, 0x74, 0x25, 0x3c, 0xc9, 0x4b, 0xc6, 0xee, 0x6a, 0x65, 0x26, 0xe8, 0xc4, 0x3d, 0x52, 0x40,
0xee, 0x82, 0x2c, 0x15, 0x19, 0xf0, 0xa4, 0x1a, 0x92, 0xb1, 0xdc, 0x78, 0x60, 0x6b, 0xfa, 0x70,
0x90, 0xf8, 0xb1, 0x04, 0x79, 0x15, 0x9b, 0x58, 0x23, 0x43, 0xe2, 0xd5, 0x8f, 0x24, 0x98, 0x0f,
0xf0, 0x4b, 0x35, 0x4a, 0x4e, 0x13, 0xce, 0xfb, 0xee, 0x89, 0x1b, 0x85, 0x33, 0x48, 0x84, 0xfd,
0x0b, 0x46, 0xdd, 0x5f, 0x24, 0x9f, 0x58, 0x4c, 0x2e, 0x4f, 0xae, 0x5d, 0x16, 0xce, 0xb9, 0x8f,
0x8f, 0x1e, 0xbb, 0x59, 0xb7, 0xa3, 0x19, 0x8e, 0xca, 0xf5, 0x95, 0x2a, 0xcc, 0x95, 0x1b, 0xf6,
0xc1, 0xef, 0xe9, 0x25, 0xe5, 0x10, 0xe6, 0xa3, 0x6b, 0x0c, 0xb2, 0xd7, 0xab, 0x90, 0x8b, 0xb0,
0xcc, 0xb7, 0x3d, 0xa1, 0x4e, 0x85, 0x69, 0x26, 0xca, 0xd7, 0xae, 0xdb, 0xd9, 0xcd, 0xb6, 0xa3,
0x39, 0xd4, 0x38, 0xed, 0x8b, 0xed, 0x0a, 0x64, 0x5b, 0x1d, 0x1c, 0x5c, 0x2f, 0xc5, 0xf4, 0x32,
0xbe, 0x94, 0xf1, 0xf5, 0x95, 0x04, 0xb3, 0xee, 0x3d, 0x77, 0x96, 0x30, 0x7f, 0x29, 0xc1, 0xcc,
0x3d, 0x8d, 0x9c, 0x25, 0xc8, 0x2f, 0x24, 0x98, 0x71, 0x8f, 0x4c, 0x8e, 0xf9, 0x74, 0x21, 0x2f,
0xc1, 0x54, 0x18, 0x32, 0xc9, 0xa7, 0x58, 0x48, 0x67, 0x43, 0x98, 0x89, 0xf2, 0xad, 0x04, 0x0b,
0xde, 0xc9, 0x3a, 0x14, 0x5c, 0xf7, 0x0d, 0xfc, 0x85, 0x04, 0x73, 0x3e, 0xe2, 0xd3, 0x3e, 0x80,
0xfb, 0x0d, 0x91, 0xf7, 0x24, 0x98, 0x8f, 0x82, 0x3e, 0x95, 0x63, 0xfa, 0x0b, 0x09, 0x66, 0xdd,
0x33, 0x74, 0x28, 0x7c, 0x1e, 0xad, 0x47, 0x52, 0x82, 0x7a, 0xe4, 0x53, 0x89, 0x5f, 0x2c, 0x01,
0xc0, 0x83, 0x10, 0x27, 0x08, 0xb3, 0x84, 0x28, 0xcc, 0x5c, 0x6c, 0xbe, 0xa4, 0x54, 0x24, 0xf9,
0xe4, 0x62, 0xd2, 0xc5, 0x16, 0x94, 0xb1, 0x62, 0xa0, 0x53, 0xe2, 0x95, 0x71, 0xbd, 0x89, 0x2d,
0xfa, 0xf2, 0x74, 0x46, 0xc9, 0x48, 0x1c, 0x27, 0x03, 0x5d, 0x80, 0x09, 0xc2, 0xd7, 0xf1, 0xab,
0xb7, 0xae, 0x40, 0x79, 0x47, 0x82, 0x85, 0x63, 0x70, 0x06, 0x21, 0x2b, 0x0f, 0x63, 0x86, 0xa5,
0xe3, 0x43, 0x1f, 0x4d, 0x67, 0xe8, 0xfe, 0x53, 0x6d, 0x1b, 0xa6, 0xee, 0xc3, 0xe8, 0x0c, 0x5d,
0x4e, 0x90, 0xeb, 0xaf, 0x3f, 0x84, 0x8f, 0x45, 0x98, 0x0c, 0x38, 0xc4, 0x83, 0x12, 0x14, 0x29,
0x6f, 0xc2, 0x4c, 0x08, 0xcd, 0x20, 0x74, 0xfc, 0x09, 0xc0, 0x27, 0x9b, 0x87, 0x4d, 0x52, 0x0d,
0x48, 0x94, 0x1f, 0x25, 0x40, 0xbc, 0x48, 0x28, 0xb9, 0x34, 0x9d, 0x66, 0x66, 0x5d, 0x04, 0x78,
0xc3, 0xc0, 0xa6, 0x1e, 0x3c, 0x92, 0x26, 0x98, 0x84, 0xfd, 0x5d, 0x04, 0x19, 0x1f, 0x52, 0x47,
0xab, 0xb4, 0x34, 0x47, 0x6b, 0x92, 0xfc, 0x68, 0xbf, 0xa7, 0xc8, 0x24, 0x9b, 0xb6, 0xc3, 0x66,
0x29, 0xdf, 0xbb, 0xe5, 0x85, 0x17, 0x6f, 0xc3, 0xbe, 0xe3, 0x8b, 0x00, 0x2c, 0x76, 0xf9, 0xdf,
0xa3, 0xfc, 0x6f, 0x26, 0x61, 0xe7, 0xb3, 0x09, 0x39, 0xb6, 0x03, 0xbe, 0x9d, 0x96, 0x6b, 0x35,
0x32, 0x45, 0x8a, 0x4c, 0x41, 0xff, 0x81, 0xb4, 0xc7, 0x5e, 0xdf, 0x67, 0xb0, 0x37, 0x41, 0xf9,
0x4c, 0x82, 0xb9, 0x08, 0x71, 0x83, 0xc4, 0xe5, 0x2e, 0x20, 0x0e, 0x54, 0xef, 0xa2, 0xef, 0xa0,
0x8a, 0xbc, 0x10, 0xbd, 0x41, 0x74, 0xaf, 0xea, 0xb4, 0x11, 0x91, 0x10, 0xe5, 0x3b, 0x09, 0xa6,
0x99, 0x9e, 0xbb, 0x1a, 0x3e, 0xbb, 0xae, 0x7d, 0x5b, 0x02, 0x14, 0xdc, 0xc7, 0x20, 0x4c, 0xff,
0x83, 0x5f, 0xbb, 0x7c, 0x27, 0xd9, 0xb5, 0x4b, 0xc2, 0x39, 0x81, 0xc5, 0xb8, 0xb6, 0xf2, 0xb3,
0x04, 0x99, 0x92, 0x45, 0xb0, 0x43, 0x87, 0xbf, 0x54, 0x41, 0x7f, 0x87, 0x71, 0xc7, 0x3e, 0xa8,
0xe8, 0x1a, 0xd5, 0xbc, 0x73, 0xe1, 0x9c, 0x10, 0xde, 0xba, 0x69, 0x57, 0xd5, 0x31, 0xc7, 0x3e,
0x28, 0x6a, 0x54, 0x43, 0xe7, 0x61, 0xa2, 0xa1, 0x91, 0x46, 0xe5, 0x19, 0x3e, 0x22, 0xf9, 0xf4,
0x62, 0x72, 0x39, 0xa3, 0x8e, 0xbb, 0x82, 0xfb, 0xf8, 0x88, 0x28, 0xef, 0x4a, 0x90, 0xed, 0xec,
0x7f, 0x10, 0xfa, 0x2f, 0xc1, 0xa4, 0x63, 0x1f, 0x94, 0x8a, 0x95, 0x2a, 0xae, 0x1b, 0x96, 0x77,
0x23, 0x00, 0x13, 0xad, 0xbb, 0x12, 0x17, 0x05, 0x57, 0xc0, 0x96, 0xee, 0xdd, 0x06, 0xe3, 0x4c,
0xb0, 0x69, 0xe9, 0xca, 0x3e, 0xe4, 0x76, 0x4c, 0xad, 0x86, 0x1b, 0xb6, 0xa9, 0x63, 0x87, 0x65,
0x25, 0xca, 0x41, 0x92, 0x6a, 0x75, 0x2f, 0xb9, 0xdd, 0x9f, 0xe8, 0xdf, 0x90, 0xa2, 0x47, 0xad,
0x8e, 0x87, 0xff, 0x22, 0x4c, 0x9f, 0x80, 0x99, 0xdd, 0xa3, 0x16, 0x56, 0xd9, 0x0c, 0x34, 0x0f,
0x69, 0xd6, 0x8b, 0xe2, 0xb5, 0x82, 0xac, 0x7a, 0x23, 0xe5, 0x69, 0x68, 0xdd, 0x2d, 0xc7, 0x6e,
0xb7, 0x50, 0x09, 0xe4, 0x56, 0x57, 0xe6, 0x92, 0x10, 0x9f, 0xac, 0x51, 0xd0, 0x6a, 0x68, 0xaa,
0xf2, 0x83, 0x04, 0x99, 0x32, 0xd6, 0x9c, 0x5a, 0xe3, 0x2c, 0x94, 0xef, 0x2e, 0xe3, 0x3a, 0x31,
0xbd, 0x34, 0x75, 0x7f, 0xa2, 0xeb, 0x30, 0x1d, 0xd8, 0x50, 0xa5, 0xee, 0x12, 0x94, 0x4f, 0xb3,
0x86, 0x6d, 0xae, 0x15, 0x21, 0x4e, 0xb9, 0x0f, 0xa9, 0x7b, 0x06, 0x65, 0x66, 0xdc, 0x4b, 0x58,
0x62, 0x97, 0xb0, 0xfb, 0x13, 0x9d, 0x0b, 0xc4, 0x6d, 0x82, 0x39, 0xc0, 0x0f, 0x4e, 0xd6, 0x07,
0xb6, 0x1d, 0xcf, 0x33, 0x09, 0xd5, 0x1b, 0x29, 0xaf, 0x76, 0x99, 0x23, 0x6d, 0x93, 0x92, 0x97,
0x8b, 0x4a, 0x04, 0xa9, 0x86, 0xe1, 0x95, 0xe2, 0xb2, 0xca, 0x7e, 0xbb, 0xa5, 0x98, 0x7c, 0xd7,
0x6c, 0x93, 0xd3, 0xf4, 0x89, 0x9b, 0x76, 0xb3, 0x2a, 0xae, 0x1b, 0x84, 0x62, 0xe7, 0x81, 0x61,
0x3d, 0xf3, 0x93, 0xef, 0x9f, 0x30, 0xa6, 0xe9, 0xba, 0x83, 0x09, 0x39, 0x11, 0xcf, 0x1d, 0xae,
0xa3, 0x76, 0x94, 0x03, 0xf4, 0x24, 0xfa, 0xa6, 0xe7, 0xda, 0x6d, 0x98, 0x8a, 0xe4, 0x0b, 0x1a,
0x87, 0xd4, 0xf6, 0xa3, 0xed, 0xcd, 0xdc, 0x08, 0x9a, 0x86, 0xcc, 0xe3, 0xcd, 0x8d, 0xdd, 0x47,
0x6a, 0x65, 0xbd, 0xb4, 0x7d, 0x47, 0xdd, 0xcb, 0xe9, 0x28, 0x07, 0xb2, 0x27, 0xba, 0xfb, 0xe0,
0xd1, 0x9d, 0xdd, 0x1c, 0x5e, 0xfb, 0x29, 0x07, 0x99, 0x87, 0x6c, 0xa1, 0x32, 0x76, 0xf6, 0x8d,
0x1a, 0x46, 0x15, 0xc8, 0x45, 0xbf, 0x33, 0xa0, 0x1b, 0xc2, 0xe4, 0x89, 0xf9, 0x1c, 0x51, 0x38,
0x09, 0xba, 0x32, 0x82, 0x9e, 0x40, 0x36, 0xfc, 0x81, 0x00, 0x5d, 0x13, 0x9a, 0x17, 0x7e, 0x45,
0xe8, 0x65, 0xbc, 0x02, 0x99, 0x50, 0xbf, 0x1f, 0x5d, 0x15, 0xda, 0x16, 0x7d, 0x13, 0x28, 0x5c,
0x16, 0xaa, 0x06, 0x5b, 0xf6, 0x1c, 0x7d, 0xb8, 0x85, 0x1b, 0x83, 0x5e, 0xd8, 0xe7, 0xed, 0x85,
0x5e, 0x83, 0xe9, 0x63, 0x1d, 0x59, 0x74, 0x53, 0x68, 0x3f, 0xae, 0x73, 0xdb, 0x6b, 0x89, 0x03,
0x40, 0xc7, 0x3b, 0xe7, 0x68, 0x45, 0xec, 0x81, 0xb8, 0xa6, 0x7f, 0x61, 0xb5, 0x6f, 0x7d, 0x9f,
0xb8, 0x7d, 0x58, 0xd8, 0xc2, 0x34, 0xdc, 0x50, 0x35, 0x08, 0x35, 0x6a, 0x04, 0x5d, 0x17, 0x87,
0x97, 0xb0, 0x15, 0x5c, 0xb8, 0xd1, 0x9f, 0xb2, 0xbf, 0xae, 0x09, 0x53, 0xe1, 0xc6, 0x26, 0x89,
0xf1, 0x98, 0xb0, 0xc5, 0x5a, 0xb8, 0xde, 0x97, 0xae, 0xbf, 0xda, 0x53, 0x98, 0x8a, 0xf4, 0x32,
0xe3, 0x76, 0x27, 0xec, 0x78, 0xf6, 0xf2, 0xde, 0x1e, 0x64, 0x42, 0x4d, 0xc7, 0x98, 0xf0, 0x16,
0x35, 0x26, 0x7b, 0x99, 0x7e, 0x0a, 0x72, 0xb0, 0x37, 0x88, 0x96, 0xe3, 0x12, 0xe7, 0x98, 0xe1,
0xbe, 0xf2, 0x66, 0x8f, 0xe7, 0x8d, 0x3f, 0x99, 0xc4, 0x2c, 0x20, 0x68, 0xf6, 0xf5, 0x42, 0xfe,
0xba, 0x9f, 0x35, 0x01, 0xeb, 0x37, 0x4e, 0xca, 0x9a, 0x5f, 0xcb, 0x0d, 0x81, 0xf9, 0x2d, 0x4c,
0x43, 0x4d, 0x26, 0x2f, 0x74, 0xc5, 0xa1, 0x24, 0xec, 0xa1, 0xc5, 0x84, 0x92, 0xb8, 0x75, 0xa5,
0x8c, 0x20, 0x03, 0xb2, 0xa1, 0xe6, 0x0c, 0x89, 0x71, 0xb6, 0xa8, 0xe5, 0x54, 0xb8, 0xd6, 0x8f,
0xaa, 0xbf, 0xd4, 0xff, 0x61, 0x32, 0xf0, 0xb8, 0x46, 0x4b, 0x27, 0x44, 0x6c, 0xf0, 0x31, 0xda,
0x8b, 0xb6, 0x06, 0x64, 0x42, 0x4f, 0xb1, 0xb8, 0x68, 0x15, 0xbc, 0x73, 0x63, 0x36, 0x20, 0x7c,
0xd9, 0x29, 0x23, 0xa8, 0x0a, 0x99, 0x2d, 0x4c, 0xbb, 0xaf, 0x03, 0xf4, 0xd7, 0xf8, 0xb7, 0x59,
0xf0, 0xcd, 0x55, 0x58, 0xea, 0xa9, 0xe7, 0xaf, 0x51, 0x86, 0x34, 0x2f, 0xb4, 0x91, 0x12, 0x33,
0x29, 0xf0, 0x0a, 0x29, 0xfc, 0xf9, 0x44, 0x1d, 0xdf, 0xa8, 0x0a, 0x69, 0x5e, 0x26, 0xc5, 0x18,
0x0d, 0x55, 0x9f, 0x85, 0x93, 0x75, 0x58, 0x9d, 0xa5, 0x8c, 0xa0, 0x12, 0x8c, 0xb2, 0xfa, 0x08,
0x89, 0x13, 0x33, 0x58, 0x3b, 0xf5, 0xf2, 0xe0, 0xff, 0x40, 0xde, 0xc2, 0xb4, 0xa8, 0x6f, 0x34,
0x34, 0xcb, 0xc2, 0x26, 0x2a, 0x08, 0xd5, 0x37, 0x9b, 0x2d, 0x7a, 0x14, 0xb3, 0xe3, 0xf0, 0x67,
0x69, 0x65, 0x64, 0xad, 0x0e, 0xf2, 0x8e, 0x63, 0x1f, 0x1e, 0x75, 0xea, 0x8d, 0x57, 0x40, 0x0e,
0x16, 0x52, 0x27, 0x2e, 0x71, 0x35, 0x26, 0xa9, 0x8f, 0xd7, 0x61, 0xca, 0xc8, 0xfa, 0xfa, 0x6b,
0xb7, 0xeb, 0x06, 0x6d, 0xb4, 0xab, 0xae, 0x8d, 0xd5, 0xe7, 0x86, 0x69, 0x1a, 0xcf, 0x29, 0xae,
0x35, 0x56, 0xf9, 0xb4, 0x9b, 0xba, 0x41, 0xa8, 0x63, 0x54, 0xdb, 0x14, 0xeb, 0xab, 0x86, 0x45,
0xb1, 0x63, 0x69, 0xe6, 0x2a, 0x33, 0xec, 0x69, 0xb4, 0xaa, 0xd5, 0x34, 0x1b, 0xdf, 0xfa, 0x25,
0x00, 0x00, 0xff, 0xff, 0x47, 0x3b, 0x71, 0x2a, 0xbd, 0x21, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -9,7 +9,6 @@ import (
type Condition interface {
WaitToFinish() error
Notify(err error)
Ctx() context.Context
}
type TaskCondition struct {
@ -32,10 +31,6 @@ func (tc *TaskCondition) Notify(err error) {
tc.done <- err
}
func (tc *TaskCondition) Ctx() context.Context {
return tc.ctx
}
func NewTaskCondition(ctx context.Context) *TaskCondition {
return &TaskCondition{
done: make(chan error),

View File

@ -0,0 +1,123 @@
package proxynode
//
//func (node *NodeImpl) DescribePartition(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
// log.Println("describe partition: ", in)
//
// return &milvuspb.PartitionDescription{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
// Reason: "Deprecated!",
// },
// Name: in,
// Statistics: nil,
// }, nil
//
//}
//
//func (p *NodeImpl) DescribePartition2(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
// log.Println("describe partition: ", in)
// dpt := &DescribePartitionTask{
// Condition: NewTaskCondition(ctx),
// DescribePartitionRequest: internalpb.DescribePartitionRequest{
// MsgType: commonpb.MsgType_kDescribePartition,
// ReqID: 0,
// Timestamp: 0,
// ProxyID: 0,
// PartitionName: in,
// //TODO, ReqID,Timestamp,ProxyID
// },
// masterClient: p.masterClient,
// result: nil,
// ctx: nil,
// }
//
// var cancel func()
// dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
// defer cancel()
//
// err := func() error {
// select {
// case <-ctx.Done():
// return errors.New("describe partion timeout")
// default:
// return p.sched.DdQueue.Enqueue(dpt)
// }
// }()
//
// if err != nil {
// return &milvuspb.PartitionDescription{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
// Reason: err.Error(),
// },
// Name: in,
// Statistics: nil,
// }, nil
// }
//
// err = dpt.WaitToFinish()
// if err != nil {
// return &milvuspb.PartitionDescription{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
// Reason: err.Error(),
// },
// Name: in,
// Statistics: nil,
// }, nil
// }
// return dpt.result, nil
//}
//
//func (node *NodeImpl) DescribeIndexProgress(ctx context.Context, req *milvuspb.DescribeIndexProgressRequest) (*milvuspb.BoolResponse, error) {
// log.Println("Describe index progress for: ", req.FieldName)
// dipt := &GetIndexStateTask{
// Condition: NewTaskCondition(ctx),
// IndexStateRequest: milvuspb.IndexStateRequest{
// Base: &commonpb.MsgBase{
// MsgType: commonpb.MsgType_kGetIndexState,
// SourceID: Params.ProxyID(),
// },
// CollectionName: req.CollectionName,
// FieldName: req.FieldName,
// },
// masterClient: node.masterClient,
// }
//
// var cancel func()
// dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
// defer cancel()
//
// fn := func() error {
// select {
// case <-ctx.Done():
// return errors.New("create index timeout")
// default:
// return node.sched.DdQueue.Enqueue(dipt)
// }
// }
// err := fn()
// if err != nil {
// return &milvuspb.BoolResponse{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
// Reason: err.Error(),
// },
// Value: false,
// }, nil
// }
//
// err = dipt.WaitToFinish()
// if err != nil {
// return &milvuspb.BoolResponse{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
// Reason: err.Error(),
// },
// Value: false,
// }, nil
// }
//
// return dipt.result, nil
//}

View File

@ -6,9 +6,11 @@ import (
"strconv"
"time"
"github.com/opentracing/opentracing-go"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
@ -33,17 +35,26 @@ func (node *NodeImpl) InvalidateCollectionMetaCache(ctx context.Context, request
func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
log.Println("create collection: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
cct := &CreateCollectionTask{
Condition: NewTaskCondition(ctx),
CreateCollectionRequest: request,
masterClient: node.masterClient,
dataServiceClient: node.dataServiceClient,
}
var cancel func()
cct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(cct)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(cct)
}
}
err := fn()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
@ -64,16 +75,25 @@ func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest
func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
log.Println("drop collection: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
dct := &DropCollectionTask{
Condition: NewTaskCondition(ctx),
DropCollectionRequest: request,
masterClient: node.masterClient,
}
var cancel func()
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(dct)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(dct)
}
}
err := fn()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
@ -94,16 +114,25 @@ func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*
func (node *NodeImpl) HasCollection(request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
log.Println("has collection: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
hct := &HasCollectionTask{
Condition: NewTaskCondition(ctx),
HasCollectionRequest: request,
masterClient: node.masterClient,
}
var cancel func()
hct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(hct)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(hct)
}
}
err := fn()
if err != nil {
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
@ -136,16 +165,25 @@ func (node *NodeImpl) ReleaseCollection(request *milvuspb.ReleaseCollectionReque
func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
log.Println("describe collection: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
dct := &DescribeCollectionTask{
Condition: NewTaskCondition(ctx),
DescribeCollectionRequest: request,
masterClient: node.masterClient,
}
var cancel func()
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(dct)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(dct)
}
}
err := fn()
if err != nil {
return &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{
@ -170,15 +208,25 @@ func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionReq
func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
log.Println("get collection statistics")
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
g := &GetCollectionsStatisticsTask{
Condition: NewTaskCondition(ctx),
CollectionStatsRequest: request,
dataServiceClient: node.dataServiceClient,
}
var cancel func()
g.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(g)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(g)
}
}
err := fn()
if err != nil {
return &milvuspb.CollectionStatsResponse{
Status: &commonpb.Status{
@ -203,15 +251,25 @@ func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsR
func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
log.Println("show collections")
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
sct := &ShowCollectionsTask{
Condition: NewTaskCondition(ctx),
ShowCollectionRequest: request,
masterClient: node.masterClient,
}
var cancel func()
sct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := node.sched.DdQueue.Enqueue(sct)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DdQueue.Enqueue(sct)
}
}
err := fn()
if err != nil {
return &milvuspb.ShowCollectionResponse{
Status: &commonpb.Status{
@ -236,16 +294,27 @@ func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (
func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
log.Println("create partition", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
cpt := &CreatePartitionTask{
Condition: NewTaskCondition(ctx),
CreatePartitionRequest: request,
masterClient: node.masterClient,
result: nil,
ctx: nil,
}
var cancel func()
cpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := func() error {
select {
case <-ctx.Done():
return errors.New("create partition timeout")
default:
return node.sched.DdQueue.Enqueue(cpt)
}
}()
err := node.sched.DdQueue.Enqueue(cpt)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
@ -264,16 +333,27 @@ func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest)
func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
log.Println("drop partition: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
dpt := &DropPartitionTask{
Condition: NewTaskCondition(ctx),
DropPartitionRequest: request,
masterClient: node.masterClient,
result: nil,
ctx: nil,
}
err := node.sched.DdQueue.Enqueue(dpt)
var cancel func()
dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := func() error {
select {
case <-ctx.Done():
return errors.New("drop partition timeout")
default:
return node.sched.DdQueue.Enqueue(dpt)
}
}()
if err != nil {
return &commonpb.Status{
@ -293,16 +373,27 @@ func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*co
func (node *NodeImpl) HasPartition(request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
log.Println("has partition: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
hpt := &HasPartitionTask{
Condition: NewTaskCondition(ctx),
HasPartitionRequest: request,
masterClient: node.masterClient,
result: nil,
ctx: nil,
}
err := node.sched.DdQueue.Enqueue(hpt)
var cancel func()
hpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := func() error {
select {
case <-ctx.Done():
return errors.New("has partition timeout")
default:
return node.sched.DdQueue.Enqueue(hpt)
}
}()
if err != nil {
return &milvuspb.BoolResponse{
@ -340,16 +431,27 @@ func (node *NodeImpl) GetPartitionStatistics(request *milvuspb.PartitionStatsReq
func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
log.Println("show partitions: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
spt := &ShowPartitionsTask{
Condition: NewTaskCondition(ctx),
ShowPartitionRequest: request,
masterClient: node.masterClient,
result: nil,
ctx: nil,
}
err := node.sched.DdQueue.Enqueue(spt)
var cancel func()
spt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
err := func() error {
select {
case <-ctx.Done():
return errors.New("show partition timeout")
default:
return node.sched.DdQueue.Enqueue(spt)
}
}()
if err != nil {
return &milvuspb.ShowPartitionResponse{
@ -374,15 +476,26 @@ func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*m
func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
log.Println("create index for: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
cit := &CreateIndexTask{
Condition: NewTaskCondition(ctx),
CreateIndexRequest: request,
masterClient: node.masterClient,
}
err := node.sched.DdQueue.Enqueue(cit)
var cancel func()
cit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create index timeout")
default:
return node.sched.DdQueue.Enqueue(cit)
}
}
err := fn()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
@ -403,15 +516,26 @@ func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*common
func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
log.Println("Describe index for: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
dit := &DescribeIndexTask{
Condition: NewTaskCondition(ctx),
DescribeIndexRequest: request,
masterClient: node.masterClient,
}
err := node.sched.DdQueue.Enqueue(dit)
var cancel func()
dit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create index timeout")
default:
return node.sched.DdQueue.Enqueue(dit)
}
}
err := fn()
if err != nil {
return &milvuspb.DescribeIndexResponse{
Status: &commonpb.Status{
@ -436,14 +560,25 @@ func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*mi
func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
// log.Println("Describe index progress for: ", request)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
dipt := &GetIndexStateTask{
Condition: NewTaskCondition(ctx),
IndexStateRequest: request,
}
err := node.sched.DdQueue.Enqueue(dipt)
var cancel func()
dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create index timeout")
default:
return node.sched.DdQueue.Enqueue(dipt)
}
}
err := fn()
if err != nil {
return &milvuspb.IndexStateResponse{
Status: &commonpb.Status{
@ -467,10 +602,14 @@ func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvu
}
func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
span, ctx := opentracing.StartSpanFromContext(ctx, "insert grpc received")
defer span.Finish()
span.SetTag("collection name", request.CollectionName)
span.SetTag("partition tag", request.PartitionName)
log.Println("insert into: ", request.CollectionName)
it := &InsertTask{
ctx: ctx,
Condition: NewTaskCondition(ctx),
dataServiceClient: node.dataServiceClient,
BaseInsertTask: BaseInsertTask{
@ -493,7 +632,20 @@ func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertR
it.PartitionName = Params.DefaultPartitionTag
}
err := node.sched.DmQueue.Enqueue(it)
var cancel func()
it.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
defer cancel()
fn := func() error {
select {
case <-ctx.Done():
return errors.New("insert timeout")
default:
return node.sched.DmQueue.Enqueue(it)
}
}
err := fn()
if err != nil {
return &milvuspb.InsertResponse{
@ -518,10 +670,15 @@ func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertR
}
func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ctx := context.Background()
span, ctx := opentracing.StartSpanFromContext(ctx, "search grpc received")
defer span.Finish()
span.SetTag("collection name", request.CollectionName)
span.SetTag("partition tag", request.PartitionNames)
span.SetTag("dsl", request.Dsl)
log.Println("search: ", request.CollectionName, request.Dsl)
qt := &SearchTask{
ctx: ctx,
Condition: NewTaskCondition(ctx),
SearchRequest: internalpb2.SearchRequest{
Base: &commonpb.MsgBase{
@ -534,8 +691,20 @@ func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchR
resultBuf: make(chan []*internalpb2.SearchResults),
query: request,
}
var cancel func()
qt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
log.Printf("grpc address of query task: %p", qt)
defer cancel()
err := node.sched.DqQueue.Enqueue(qt)
fn := func() error {
select {
case <-ctx.Done():
return errors.New("create collection timeout")
default:
return node.sched.DqQueue.Enqueue(qt)
}
}
err := fn()
if err != nil {
return &milvuspb.SearchResults{
Status: &commonpb.Status{
@ -559,32 +728,7 @@ func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchR
}
func (node *NodeImpl) Flush(request *milvuspb.FlushRequest) (*commonpb.Status, error) {
log.Println("AA Flush collections: ", request.CollectionNames)
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
defer cancel()
ft := &FlushTask{
Condition: NewTaskCondition(ctx),
FlushRequest: request,
dataServiceClient: node.dataServiceClient,
}
err := node.sched.DdQueue.Enqueue(ft)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}, nil
}
err = ft.WaitToFinish()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}, nil
}
return ft.result, nil
panic("implement me")
}
func (node *NodeImpl) GetDdChannel(request *commonpb.Empty) (*milvuspb.StringResponse, error) {

View File

@ -3,6 +3,7 @@ package proxynode
import (
"log"
"sort"
"strconv"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
@ -180,7 +181,6 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
partitionID := insertRequest.PartitionID
partitionName := insertRequest.PartitionName
proxyID := insertRequest.Base.SourceID
channelNames := channelNamesMap[collectionID]
for index, key := range keys {
ts := insertRequest.Timestamps[index]
rowID := insertRequest.RowIDs[index]
@ -191,7 +191,6 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
result[key] = &msgPack
}
segmentID := getSegmentID(reqID, key)
channelID := channelNames[int(key)%len(channelNames)]
sliceRequest := internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
@ -205,8 +204,7 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
PartitionName: partitionName,
SegmentID: segmentID,
// todo rename to ChannelName
// ChannelID: strconv.FormatInt(int64(key), 10),
ChannelID: channelID,
ChannelID: strconv.FormatInt(int64(key), 10),
Timestamps: []uint64{ts},
RowIDs: []int64{rowID},
RowData: []*commonpb.Blob{row},

View File

@ -1,6 +1,7 @@
package proxynode
import (
"context"
"errors"
"log"
"math"
@ -8,6 +9,9 @@ import (
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/opentracing/opentracing-go"
oplog "github.com/opentracing/opentracing-go/log"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
@ -41,6 +45,7 @@ type InsertTask struct {
Condition
dataServiceClient DataServiceClient
result *milvuspb.InsertResponse
ctx context.Context
rowIDAllocator *allocator.IDAllocator
}
@ -82,12 +87,21 @@ func (it *InsertTask) PreExecute() error {
it.Base.MsgType = commonpb.MsgType_kInsert
it.Base.SourceID = Params.ProxyID
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask preExecute")
defer span.Finish()
it.ctx = ctx
span.SetTag("hash keys", it.Base.MsgID)
span.SetTag("start time", it.BeginTs())
collectionName := it.BaseInsertTask.CollectionName
if err := ValidateCollectionName(collectionName); err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
partitionTag := it.BaseInsertTask.PartitionName
if err := ValidatePartitionTag(partitionTag, true); err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
@ -95,7 +109,13 @@ func (it *InsertTask) PreExecute() error {
}
func (it *InsertTask) Execute() error {
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask Execute")
defer span.Finish()
it.ctx = ctx
span.SetTag("hash keys", it.Base.MsgID)
span.SetTag("start time", it.BeginTs())
collectionName := it.BaseInsertTask.CollectionName
span.LogFields(oplog.String("collection_name", collectionName))
collSchema, err := globalMetaCache.GetCollectionSchema(collectionName)
if err != nil {
return err
@ -106,24 +126,19 @@ func (it *InsertTask) Execute() error {
return err
}
it.CollectionID = collID
var partitionID UniqueID
if len(it.PartitionName) > 0 {
partitionID, err = globalMetaCache.GetPartitionID(collectionName, it.PartitionName)
if err != nil {
return err
}
} else {
partitionID, err = globalMetaCache.GetPartitionID(collectionName, Params.DefaultPartitionTag)
if err != nil {
return err
}
partitionID, err := globalMetaCache.GetPartitionID(collectionName, it.PartitionName)
if err != nil {
return err
}
it.PartitionID = partitionID
span.LogFields(oplog.Bool("auto_id", autoID))
var rowIDBegin UniqueID
var rowIDEnd UniqueID
rowNums := len(it.BaseInsertTask.RowData)
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
span.LogFields(oplog.Int("rowNums", rowNums),
oplog.Int("rowIDBegin", int(rowIDBegin)),
oplog.Int("rowIDEnd", int(rowIDEnd)))
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
for i := rowIDBegin; i < rowIDEnd; i++ {
offset := i - rowIDBegin
@ -146,7 +161,8 @@ func (it *InsertTask) Execute() error {
EndTs: it.EndTs(),
Msgs: make([]msgstream.TsMsg, 1),
}
tsMsg.SetMsgContext(it.Ctx())
tsMsg.SetMsgContext(ctx)
span.LogFields(oplog.String("send msg", "send msg"))
it.result = &milvuspb.InsertResponse{
Status: &commonpb.Status{
@ -182,6 +198,7 @@ func (it *InsertTask) Execute() error {
if err != nil {
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
it.result.Status.Reason = err.Error()
span.LogFields(oplog.Error(err))
return err
}
@ -189,6 +206,7 @@ func (it *InsertTask) Execute() error {
if err != nil {
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
it.result.Status.Reason = err.Error()
span.LogFields(oplog.Error(err))
return err
}
@ -196,6 +214,8 @@ func (it *InsertTask) Execute() error {
}
func (it *InsertTask) PostExecute() error {
span, _ := opentracing.StartSpanFromContext(it.ctx, "InsertTask postExecute")
defer span.Finish()
return nil
}
@ -205,6 +225,7 @@ type CreateCollectionTask struct {
masterClient MasterClient
dataServiceClient DataServiceClient
result *commonpb.Status
ctx context.Context
schema *schemapb.CollectionSchema
}
@ -345,6 +366,7 @@ type DropCollectionTask struct {
*milvuspb.DropCollectionRequest
masterClient MasterClient
result *commonpb.Status
ctx context.Context
}
func (dct *DropCollectionTask) OnEnqueue() error {
@ -413,6 +435,7 @@ type SearchTask struct {
queryMsgStream *pulsarms.PulsarMsgStream
resultBuf chan []*internalpb2.SearchResults
result *milvuspb.SearchResults
ctx context.Context
query *milvuspb.SearchRequest
}
@ -448,52 +471,52 @@ func (st *SearchTask) PreExecute() error {
st.Base.MsgType = commonpb.MsgType_kSearch
st.Base.SourceID = Params.ProxyID
span, ctx := opentracing.StartSpanFromContext(st.ctx, "SearchTask preExecute")
defer span.Finish()
st.ctx = ctx
span.SetTag("hash keys", st.Base.MsgID)
span.SetTag("start time", st.BeginTs())
collectionName := st.query.CollectionName
_, err := globalMetaCache.GetCollectionID(collectionName)
if err != nil { // err is not nil if collection not exists
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
if err := ValidateCollectionName(st.query.CollectionName); err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
for _, tag := range st.query.PartitionNames {
if err := ValidatePartitionTag(tag, false); err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
}
st.Base.MsgType = commonpb.MsgType_kSearch
queryBytes, err := proto.Marshal(st.query)
if err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
return err
}
st.Query = &commonpb.Blob{
Value: queryBytes,
}
st.ResultChannelID = Params.SearchResultChannelNames[0]
st.DbID = 0 // todo
collectionID, err := globalMetaCache.GetCollectionID(collectionName)
if err != nil { // err is not nil if collection not exists
return err
}
st.CollectionID = collectionID
st.PartitionIDs = make([]UniqueID, 0)
for _, partitionName := range st.query.PartitionNames {
partitionID, err := globalMetaCache.GetPartitionID(collectionName, partitionName)
if err != nil {
return err
}
st.PartitionIDs = append(st.PartitionIDs, partitionID)
}
st.Dsl = st.query.Dsl
st.PlaceholderGroup = st.query.PlaceholderGroup
return nil
}
func (st *SearchTask) Execute() error {
span, ctx := opentracing.StartSpanFromContext(st.ctx, "SearchTask Execute")
defer span.Finish()
st.ctx = ctx
span.SetTag("hash keys", st.Base.MsgID)
span.SetTag("start time", st.BeginTs())
var tsMsg msgstream.TsMsg = &msgstream.SearchMsg{
SearchRequest: st.SearchRequest,
BaseMsg: msgstream.BaseMsg{
@ -507,24 +530,32 @@ func (st *SearchTask) Execute() error {
EndTs: st.Base.Timestamp,
Msgs: make([]msgstream.TsMsg, 1),
}
tsMsg.SetMsgContext(st.Ctx())
tsMsg.SetMsgContext(ctx)
msgPack.Msgs[0] = tsMsg
err := st.queryMsgStream.Produce(msgPack)
log.Printf("[NodeImpl] length of searchMsg: %v", len(msgPack.Msgs))
if err != nil {
span.LogFields(oplog.Error(err))
span.Finish()
log.Printf("[NodeImpl] send search request failed: %v", err)
}
return err
}
func (st *SearchTask) PostExecute() error {
span, _ := opentracing.StartSpanFromContext(st.ctx, "SearchTask postExecute")
defer span.Finish()
span.SetTag("hash keys", st.Base.MsgID)
span.SetTag("start time", st.BeginTs())
for {
select {
case <-st.Ctx().Done():
case <-st.ctx.Done():
log.Print("SearchTask: wait to finish failed, timeout!, taskID:", st.ID())
span.LogFields(oplog.String("wait to finish failed, timeout", "wait to finish failed, timeout"))
return errors.New("SearchTask:wait to finish failed, timeout:" + strconv.FormatInt(st.ID(), 10))
case searchResults := <-st.resultBuf:
// fmt.Println("searchResults: ", searchResults)
span.LogFields(oplog.String("receive result", "receive result"))
filterSearchResult := make([]*internalpb2.SearchResults, 0)
var filterReason string
for _, partialSearchResult := range searchResults {
@ -553,6 +584,7 @@ func (st *SearchTask) PostExecute() error {
Reason: filterReason,
},
}
span.LogFields(oplog.Error(errors.New(filterReason)))
return errors.New(filterReason)
}
@ -661,6 +693,7 @@ func (st *SearchTask) PostExecute() error {
reducedHitsBs, err := proto.Marshal(reducedHits)
if err != nil {
log.Println("marshal error")
span.LogFields(oplog.Error(err))
return err
}
st.result.Hits = append(st.result.Hits, reducedHitsBs)
@ -675,6 +708,7 @@ type HasCollectionTask struct {
*milvuspb.HasCollectionRequest
masterClient MasterClient
result *milvuspb.BoolResponse
ctx context.Context
}
func (hct *HasCollectionTask) OnEnqueue() error {
@ -731,6 +765,7 @@ type DescribeCollectionTask struct {
*milvuspb.DescribeCollectionRequest
masterClient MasterClient
result *milvuspb.DescribeCollectionResponse
ctx context.Context
}
func (dct *DescribeCollectionTask) OnEnqueue() error {
@ -790,6 +825,7 @@ type GetCollectionsStatisticsTask struct {
*milvuspb.CollectionStatsRequest
dataServiceClient DataServiceClient
result *milvuspb.CollectionStatsResponse
ctx context.Context
}
func (g *GetCollectionsStatisticsTask) ID() UniqueID {
@ -865,6 +901,7 @@ type ShowCollectionsTask struct {
*milvuspb.ShowCollectionRequest
masterClient MasterClient
result *milvuspb.ShowCollectionResponse
ctx context.Context
}
func (sct *ShowCollectionsTask) OnEnqueue() error {
@ -918,6 +955,7 @@ type CreatePartitionTask struct {
*milvuspb.CreatePartitionRequest
masterClient MasterClient
result *commonpb.Status
ctx context.Context
}
func (cpt *CreatePartitionTask) OnEnqueue() error {
@ -980,6 +1018,7 @@ type DropPartitionTask struct {
*milvuspb.DropPartitionRequest
masterClient MasterClient
result *commonpb.Status
ctx context.Context
}
func (dpt *DropPartitionTask) OnEnqueue() error {
@ -1042,6 +1081,7 @@ type HasPartitionTask struct {
*milvuspb.HasPartitionRequest
masterClient MasterClient
result *milvuspb.BoolResponse
ctx context.Context
}
func (hpt *HasPartitionTask) OnEnqueue() error {
@ -1098,11 +1138,66 @@ func (hpt *HasPartitionTask) PostExecute() error {
return nil
}
//type DescribePartitionTask struct {
// Condition
// internalpb.DescribePartitionRequest
// masterClient masterpb.MasterServiceClient
// result *milvuspb.PartitionDescription
// ctx context.Context
//}
//
//func (dpt *DescribePartitionTask) ID() UniqueID {
// return dpt.ReqID
//}
//
//func (dpt *DescribePartitionTask) SetID(uid UniqueID) {
// dpt.ReqID = uid
//}
//
//func (dpt *DescribePartitionTask) Type() commonpb.MsgType {
// return dpt.MsgType
//}
//
//func (dpt *DescribePartitionTask) BeginTs() Timestamp {
// return dpt.Timestamp
//}
//
//func (dpt *DescribePartitionTask) EndTs() Timestamp {
// return dpt.Timestamp
//}
//
//func (dpt *DescribePartitionTask) SetTs(ts Timestamp) {
// dpt.Timestamp = ts
//}
//
//func (dpt *DescribePartitionTask) PreExecute() error {
// collName, partitionTag := dpt.PartitionName.CollectionName, dpt.PartitionName.Tag
//
// if err := ValidateCollectionName(collName); err != nil {
// return err
// }
//
// if err := ValidatePartitionTag(partitionTag, true); err != nil {
// return err
// }
// return nil
//}
//
//func (dpt *DescribePartitionTask) Execute() (err error) {
// dpt.result, err = dpt.masterClient.DescribePartition(dpt.ctx, &dpt.DescribePartitionRequest)
// return err
//}
//
//func (dpt *DescribePartitionTask) PostExecute() error {
// return nil
//}
type ShowPartitionsTask struct {
Condition
*milvuspb.ShowPartitionRequest
masterClient MasterClient
result *milvuspb.ShowPartitionResponse
ctx context.Context
}
func (spt *ShowPartitionsTask) OnEnqueue() error {
@ -1162,6 +1257,7 @@ type CreateIndexTask struct {
*milvuspb.CreateIndexRequest
masterClient MasterClient
result *commonpb.Status
ctx context.Context
}
func (cit *CreateIndexTask) OnEnqueue() error {
@ -1224,6 +1320,7 @@ type DescribeIndexTask struct {
*milvuspb.DescribeIndexRequest
masterClient MasterClient
result *milvuspb.DescribeIndexResponse
ctx context.Context
}
func (dit *DescribeIndexTask) OnEnqueue() error {
@ -1287,6 +1384,7 @@ type GetIndexStateTask struct {
*milvuspb.IndexStateRequest
indexServiceClient IndexServiceClient
result *milvuspb.IndexStateResponse
ctx context.Context
}
func (dipt *GetIndexStateTask) OnEnqueue() error {
@ -1359,6 +1457,7 @@ type FlushTask struct {
*milvuspb.FlushRequest
dataServiceClient DataServiceClient
result *commonpb.Status
ctx context.Context
}
func (ft *FlushTask) OnEnqueue() error {
@ -1397,34 +1496,23 @@ func (ft *FlushTask) PreExecute() error {
}
func (ft *FlushTask) Execute() error {
for _, collName := range ft.CollectionNames {
collID, err := globalMetaCache.GetCollectionID(collName)
if err != nil {
return err
}
flushReq := &datapb.FlushRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
MsgID: ft.Base.MsgID,
Timestamp: ft.Base.Timestamp,
SourceID: ft.Base.SourceID,
},
DbID: 0,
CollectionID: collID,
}
var status *commonpb.Status
status, err = ft.dataServiceClient.Flush(flushReq)
if err != nil {
return nil
}
if status.ErrorCode != commonpb.ErrorCode_SUCCESS {
return errors.New(status.Reason)
}
var err error
collID, err := globalMetaCache.GetCollectionID(ft.CollectionName)
if err != nil {
return err
}
ft.result = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
flushReq := &datapb.FlushRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
MsgID: ft.Base.MsgID,
Timestamp: ft.Base.Timestamp,
SourceID: ft.Base.SourceID,
},
DbID: 0,
CollectionID: collID,
}
return nil
ft.result, err = ft.dataServiceClient.Flush(flushReq)
return err
}
func (ft *FlushTask) PostExecute() error {

View File

@ -23,10 +23,6 @@ type Collection struct {
partitions []*Partition
}
//func (c *Collection) Name() string {
// return c.schema.Name
//}
func (c *Collection) ID() UniqueID {
return c.id
}

View File

@ -43,17 +43,14 @@ type collectionReplica interface {
getVecFieldsByCollectionID(collectionID UniqueID) ([]int64, error)
// partition
// Partition tags in different collections are not unique,
// so partition api should specify the target collection.
// TODO: remove collection ID, add a `map[partitionID]partition` to replica implement
getPartitionNum(collectionID UniqueID) (int, error)
addPartition2(collectionID UniqueID, partitionTag string) error
addPartition(collectionID UniqueID, partitionID UniqueID) error
removePartition(collectionID UniqueID, partitionTag string) error
addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error
removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error
getPartitionByTag(collectionID UniqueID, partitionTag string) (*Partition, error)
removePartition(collectionID UniqueID, partitionID UniqueID) error
addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error
removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error
getPartitionByID(collectionID UniqueID, partitionID UniqueID) (*Partition, error)
hasPartition(collectionID UniqueID, partitionTag string) bool
hasPartition(collectionID UniqueID, partitionID UniqueID) bool
enablePartitionDM(collectionID UniqueID, partitionID UniqueID) error
disablePartitionDM(collectionID UniqueID, partitionID UniqueID) error
getEnablePartitionDM(collectionID UniqueID, partitionID UniqueID) (bool, error)
@ -61,7 +58,6 @@ type collectionReplica interface {
// segment
getSegmentNum() int
getSegmentStatistics() []*internalpb2.SegmentStats
addSegment2(segmentID UniqueID, partitionTag string, collectionID UniqueID, segType segmentType) error
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error
removeSegment(segmentID UniqueID) error
getSegmentByID(segmentID UniqueID) (*Segment, error)
@ -197,21 +193,6 @@ func (colReplica *collectionReplicaImpl) getPartitionNum(collectionID UniqueID)
return len(collection.partitions), nil
}
func (colReplica *collectionReplicaImpl) addPartition2(collectionID UniqueID, partitionTag string) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
return err
}
var newPartition = newPartition2(partitionTag)
*collection.Partitions() = append(*collection.Partitions(), newPartition)
return nil
}
func (colReplica *collectionReplicaImpl) addPartition(collectionID UniqueID, partitionID UniqueID) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
@ -227,14 +208,14 @@ func (colReplica *collectionReplicaImpl) addPartition(collectionID UniqueID, par
return nil
}
func (colReplica *collectionReplicaImpl) removePartition(collectionID UniqueID, partitionTag string) error {
func (colReplica *collectionReplicaImpl) removePartition(collectionID UniqueID, partitionID UniqueID) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
return colReplica.removePartitionPrivate(collectionID, partitionTag)
return colReplica.removePartitionPrivate(collectionID, partitionID)
}
func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID UniqueID, partitionTag string) error {
func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID UniqueID, partitionID UniqueID) error {
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
return err
@ -242,7 +223,7 @@ func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID Uni
var tmpPartitions = make([]*Partition, 0)
for _, p := range *collection.Partitions() {
if p.Tag() == partitionTag {
if p.ID() == partitionID {
for _, s := range *p.Segments() {
deleteSegment(colReplica.segments[s.ID()])
delete(colReplica.segments, s.ID())
@ -257,30 +238,30 @@ func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID Uni
}
// deprecated
func (colReplica *collectionReplicaImpl) addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error {
func (colReplica *collectionReplicaImpl) addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error {
if !colReplica.hasCollection(colMeta.ID) {
err := errors.New("Cannot find collection, id = " + strconv.FormatInt(colMeta.ID, 10))
return err
}
pToAdd := make([]string, 0)
for _, partitionTag := range colMeta.PartitionTags {
if !colReplica.hasPartition(colMeta.ID, partitionTag) {
pToAdd = append(pToAdd, partitionTag)
pToAdd := make([]UniqueID, 0)
for _, partitionID := range colMeta.PartitionIDs {
if !colReplica.hasPartition(colMeta.ID, partitionID) {
pToAdd = append(pToAdd, partitionID)
}
}
for _, tag := range pToAdd {
err := colReplica.addPartition2(colMeta.ID, tag)
for _, id := range pToAdd {
err := colReplica.addPartition(colMeta.ID, id)
if err != nil {
log.Println(err)
}
fmt.Println("add partition: ", tag)
fmt.Println("add partition: ", id)
}
return nil
}
func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error {
func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
@ -289,37 +270,30 @@ func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMet
return err
}
pToDel := make([]string, 0)
pToDel := make([]UniqueID, 0)
for _, partition := range col.partitions {
hasPartition := false
for _, tag := range colMeta.PartitionTags {
if partition.partitionTag == tag {
for _, id := range colMeta.PartitionIDs {
if partition.ID() == id {
hasPartition = true
}
}
if !hasPartition {
pToDel = append(pToDel, partition.partitionTag)
pToDel = append(pToDel, partition.ID())
}
}
for _, tag := range pToDel {
err := colReplica.removePartitionPrivate(col.ID(), tag)
for _, id := range pToDel {
err := colReplica.removePartitionPrivate(col.ID(), id)
if err != nil {
log.Println(err)
}
fmt.Println("delete partition: ", tag)
fmt.Println("delete partition: ", id)
}
return nil
}
func (colReplica *collectionReplicaImpl) getPartitionByTag(collectionID UniqueID, partitionTag string) (*Partition, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
return colReplica.getPartitionByTagPrivate(collectionID, partitionTag)
}
func (colReplica *collectionReplicaImpl) getPartitionByID(collectionID UniqueID, partitionID UniqueID) (*Partition, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
@ -327,21 +301,6 @@ func (colReplica *collectionReplicaImpl) getPartitionByID(collectionID UniqueID,
return colReplica.getPartitionByIDPrivate(collectionID, partitionID)
}
func (colReplica *collectionReplicaImpl) getPartitionByTagPrivate(collectionID UniqueID, partitionTag string) (*Partition, error) {
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
return nil, err
}
for _, p := range *collection.Partitions() {
if p.Tag() == partitionTag {
return p, nil
}
}
return nil, errors.New("cannot find partition, tag = " + partitionTag)
}
func (colReplica *collectionReplicaImpl) getPartitionByIDPrivate(collectionID UniqueID, partitionID UniqueID) (*Partition, error) {
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
@ -357,7 +316,7 @@ func (colReplica *collectionReplicaImpl) getPartitionByIDPrivate(collectionID Un
return nil, errors.New("cannot find partition, id = " + strconv.FormatInt(partitionID, 10))
}
func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, partitionTag string) bool {
func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, partitionID UniqueID) bool {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
@ -368,7 +327,7 @@ func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, par
}
for _, p := range *collection.Partitions() {
if p.Tag() == partitionTag {
if p.ID() == partitionID {
return true
}
}
@ -446,28 +405,6 @@ func (colReplica *collectionReplicaImpl) getSegmentStatistics() []*internalpb2.S
return statisticData
}
func (colReplica *collectionReplicaImpl) addSegment2(segmentID UniqueID, partitionTag string, collectionID UniqueID, segType segmentType) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
return err
}
partition, err2 := colReplica.getPartitionByTagPrivate(collectionID, partitionTag)
if err2 != nil {
return err2
}
var newSegment = newSegment2(collection, segmentID, partitionTag, collectionID, segType)
colReplica.segments[segmentID] = newSegment
*partition.Segments() = append(*partition.Segments(), newSegment)
return nil
}
func (colReplica *collectionReplicaImpl) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()

View File

@ -61,18 +61,18 @@ func TestCollectionReplica_getPartitionNum(t *testing.T) {
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
for _, tag := range partitionTags {
err := node.replica.addPartition2(collectionID, tag)
partitionIDs := []UniqueID{1, 2, 3}
for _, id := range partitionIDs {
err := node.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.replica.getPartitionByTag(collectionID, tag)
partition, err := node.replica.getPartitionByID(collectionID, id)
assert.NoError(t, err)
assert.Equal(t, partition.partitionTag, tag)
assert.Equal(t, partition.ID(), id)
}
partitionNum, err := node.replica.getPartitionNum(collectionID)
assert.NoError(t, err)
assert.Equal(t, partitionNum, len(partitionTags)+1) // _default
assert.Equal(t, partitionNum, len(partitionIDs)+1) // _default
node.Stop()
}
@ -81,13 +81,13 @@ func TestCollectionReplica_addPartition(t *testing.T) {
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
for _, tag := range partitionTags {
err := node.replica.addPartition2(collectionID, tag)
partitionIDs := []UniqueID{1, 2, 3}
for _, id := range partitionIDs {
err := node.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.replica.getPartitionByTag(collectionID, tag)
partition, err := node.replica.getPartitionByID(collectionID, id)
assert.NoError(t, err)
assert.Equal(t, partition.partitionTag, tag)
assert.Equal(t, partition.ID(), id)
}
node.Stop()
}
@ -97,15 +97,15 @@ func TestCollectionReplica_removePartition(t *testing.T) {
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
partitionIDs := []UniqueID{1, 2, 3}
for _, tag := range partitionTags {
err := node.replica.addPartition2(collectionID, tag)
for _, id := range partitionIDs {
err := node.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.replica.getPartitionByTag(collectionID, tag)
partition, err := node.replica.getPartitionByID(collectionID, id)
assert.NoError(t, err)
assert.Equal(t, partition.partitionTag, tag)
err = node.replica.removePartition(collectionID, tag)
assert.Equal(t, partition.ID(), id)
err = node.replica.removePartition(collectionID, id)
assert.NoError(t, err)
}
node.Stop()
@ -117,18 +117,18 @@ func TestCollectionReplica_addPartitionsByCollectionMeta(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionID, false)
collectionMeta.PartitionTags = []string{"p0", "p1", "p2"}
collectionMeta.PartitionIDs = []UniqueID{0, 1, 2}
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
assert.NoError(t, err)
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, len(collectionMeta.PartitionTags)+1)
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
assert.Equal(t, partitionNum, len(collectionMeta.PartitionIDs)+1)
hasPartition := node.replica.hasPartition(UniqueID(0), UniqueID(0))
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(1))
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(2))
assert.Equal(t, hasPartition, true)
node.Stop()
@ -140,19 +140,19 @@ func TestCollectionReplica_removePartitionsByCollectionMeta(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionID, false)
collectionMeta.PartitionTags = []string{"p0"}
collectionMeta.PartitionIDs = []UniqueID{0}
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
assert.NoError(t, err)
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, len(collectionMeta.PartitionTags)+1)
assert.Equal(t, partitionNum, len(collectionMeta.PartitionIDs)+1)
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
hasPartition := node.replica.hasPartition(UniqueID(0), UniqueID(0))
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(1))
assert.Equal(t, hasPartition, false)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(2))
assert.Equal(t, hasPartition, false)
node.Stop()
@ -165,12 +165,12 @@ func TestCollectionReplica_getPartitionByTag(t *testing.T) {
collectionMeta := genTestCollectionMeta(collectionID, false)
for _, tag := range collectionMeta.PartitionTags {
err := node.replica.addPartition2(collectionID, tag)
for _, id := range collectionMeta.PartitionIDs {
err := node.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.replica.getPartitionByTag(collectionID, tag)
partition, err := node.replica.getPartitionByID(collectionID, id)
assert.NoError(t, err)
assert.Equal(t, partition.partitionTag, tag)
assert.Equal(t, partition.ID(), id)
assert.NotNil(t, partition)
}
node.Stop()
@ -182,11 +182,11 @@ func TestCollectionReplica_hasPartition(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionID, false)
err := node.replica.addPartition2(collectionID, collectionMeta.PartitionTags[0])
err := node.replica.addPartition(collectionID, collectionMeta.PartitionIDs[0])
assert.NoError(t, err)
hasPartition := node.replica.hasPartition(collectionID, "default")
hasPartition := node.replica.hasPartition(collectionID, defaultPartitionID)
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(collectionID, "default1")
hasPartition = node.replica.hasPartition(collectionID, defaultPartitionID+1)
assert.Equal(t, hasPartition, false)
node.Stop()
}
@ -198,9 +198,8 @@ func TestCollectionReplica_addSegment(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
@ -216,10 +215,9 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
@ -237,10 +235,9 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
@ -256,10 +253,9 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)

View File

@ -61,12 +61,12 @@ func TestDataSyncService_Start(t *testing.T) {
Timestamp: uint64(i + 1000),
SourceID: 0,
},
CollectionID: UniqueID(0),
PartitionName: "default",
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
RowIDs: []int64{int64(i), int64(i)},
CollectionID: UniqueID(0),
PartitionID: defaultPartitionID,
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
RowIDs: []int64{int64(i), int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
{Value: rawData},

View File

@ -38,7 +38,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
var ddMsg = ddMsg{
collectionRecords: make(map[UniqueID][]metaOperateRecord),
partitionRecords: make(map[string][]metaOperateRecord),
partitionRecords: make(map[UniqueID][]metaOperateRecord),
timeRange: TimeRange{
timestampMin: msMsg.TimestampMin(),
timestampMax: msMsg.TimestampMax(),
@ -102,7 +102,8 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
}
// add default partition
err = ddNode.replica.addPartition2(collectionID, Params.DefaultPartitionTag)
// TODO: allocate default partition id in master
err = ddNode.replica.addPartition(collectionID, UniqueID(2021))
if err != nil {
log.Println(err)
return
@ -118,12 +119,6 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
collectionID := msg.CollectionID
//err := ddNode.replica.removeCollection(collectionID)
//if err != nil {
// log.Println(err)
// return
//}
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
metaOperateRecord{
createOrDrop: false,
@ -135,17 +130,15 @@ func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
collectionID := msg.CollectionID
partitionName := msg.PartitionName
partitionID := msg.PartitionID
err := ddNode.replica.addPartition2(collectionID, partitionName)
// TODO:: add partition by partitionID
//err := ddNode.replica.addPartition(collectionID, msg.PartitionID)
err := ddNode.replica.addPartition(collectionID, partitionID)
if err != nil {
log.Println(err)
return
}
ddNode.ddMsg.partitionRecords[partitionName] = append(ddNode.ddMsg.partitionRecords[partitionName],
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Base.Timestamp,
@ -154,22 +147,16 @@ func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
collectionID := msg.CollectionID
partitionName := msg.PartitionName
partitionID := msg.PartitionID
//err := ddNode.replica.removePartition(collectionID, partitionTag)
//if err != nil {
// log.Println(err)
// return
//}
ddNode.ddMsg.partitionRecords[partitionName] = append(ddNode.ddMsg.partitionRecords[partitionName],
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Base.Timestamp,
})
ddNode.ddMsg.gcRecord.partitions = append(ddNode.ddMsg.gcRecord.partitions, partitionWithID{
partitionTag: partitionName,
partitionID: partitionID,
collectionID: collectionID,
})
}

View File

@ -37,7 +37,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
// drop partitions
for _, partition := range gcMsg.gcRecord.partitions {
err := gcNode.replica.removePartition(partition.collectionID, partition.partitionTag)
err := gcNode.replica.removePartition(partition.collectionID, partition.partitionID)
if err != nil {
log.Println(err)
}

View File

@ -81,7 +81,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
// check if segment exists, if not, create this segment
if !iNode.replica.hasSegment(task.SegmentID) {
err := iNode.replica.addSegment2(task.SegmentID, task.PartitionName, task.CollectionID, segTypeGrowing)
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, segTypeGrowing)
if err != nil {
log.Println(err)
continue

View File

@ -15,7 +15,7 @@ type key2SegMsg struct {
type ddMsg struct {
collectionRecords map[UniqueID][]metaOperateRecord
partitionRecords map[string][]metaOperateRecord
partitionRecords map[UniqueID][]metaOperateRecord
gcRecord *gcRecord
timeRange TimeRange
}
@ -63,17 +63,16 @@ type DeletePreprocessData struct {
count int32
}
// TODO: replace partitionWithID by partition id
// TODO: delete collection id
type partitionWithID struct {
partitionTag string
partitionID UniqueID
collectionID UniqueID
}
type gcRecord struct {
// collections and partitions to be dropped
collections []UniqueID
// TODO: use partition id
partitions []partitionWithID
partitions []partitionWithID
}
func (ksMsg *key2SegMsg) TimeTick() Timestamp {

View File

@ -20,6 +20,7 @@ import (
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
@ -90,7 +91,7 @@ import (
// SourceID: 0,
// },
// CollectionID: UniqueID(collectionID),
// PartitionName: "default",
// PartitionID: defaultPartitionID,
// SegmentID: segmentID,
// ChannelID: "0",
// Timestamps: timestamps,
@ -174,8 +175,6 @@ import (
// log.Print("marshal placeholderGroup failed")
// }
// query := milvuspb.SearchRequest{
// CollectionName: "collection0",
// PartitionNames: []string{"default"},
// Dsl: dslString,
// PlaceholderGroup: placeGroupByte,
// }
@ -426,7 +425,7 @@ import (
// SourceID: 0,
// },
// CollectionID: UniqueID(collectionID),
// PartitionName: "default",
// PartitionID: defaultPartitionID,
// SegmentID: segmentID,
// ChannelID: "0",
// Timestamps: timestamps,
@ -499,8 +498,6 @@ import (
// log.Print("marshal placeholderGroup failed")
// }
// query := milvuspb.SearchRequest{
// CollectionName: "collection0",
// PartitionNames: []string{"default"},
// Dsl: dslString,
// PlaceholderGroup: placeGroupByte,
// }
@ -675,6 +672,72 @@ import (
//}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
func genETCDCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionMeta {
var fieldVec schemapb.FieldSchema
if isBinary {
fieldVec = schemapb.FieldSchema{
FieldID: UniqueID(100),
Name: "vec",
IsPrimaryKey: false,
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "128",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "metric_type",
Value: "JACCARD",
},
},
}
} else {
fieldVec = schemapb.FieldSchema{
FieldID: UniqueID(100),
Name: "vec",
IsPrimaryKey: false,
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "metric_type",
Value: "L2",
},
},
}
}
fieldInt := schemapb.FieldSchema{
FieldID: UniqueID(101),
Name: "age",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT32,
}
schema := schemapb.CollectionSchema{
AutoID: true,
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
PartitionIDs: []UniqueID{defaultPartitionID},
}
return &collectionMeta
}
func generateInsertBinLog(collectionID UniqueID, partitionID UniqueID, segmentID UniqueID, keyPrefix string) ([]*internalpb2.StringList, []int64, error) {
const (
msgLength = 1000
@ -726,7 +789,7 @@ func generateInsertBinLog(collectionID UniqueID, partitionID UniqueID, segmentID
}
// buffer data to binLogs
collMeta := genTestCollectionMeta(collectionID, false)
collMeta := genETCDCollectionMeta(collectionID, false)
collMeta.Schema.Fields = append(collMeta.Schema.Fields, &schemapb.FieldSchema{
FieldID: 0,
Name: "uid",
@ -871,7 +934,7 @@ func generateIndex(segmentID UniqueID) ([]string, error) {
return indexPaths, nil
}
func doInsert(ctx context.Context, collectionID UniqueID, partitionTag string, segmentID UniqueID) error {
func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID, segmentID UniqueID) error {
const msgLength = 1000
const DIM = 16
@ -907,12 +970,12 @@ func doInsert(ctx context.Context, collectionID UniqueID, partitionTag string, s
Timestamp: uint64(i + 1000),
SourceID: 0,
},
CollectionID: collectionID,
PartitionName: partitionTag,
SegmentID: segmentID,
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
CollectionID: collectionID,
PartitionID: partitionID,
SegmentID: segmentID,
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},

View File

@ -6,16 +6,14 @@ import (
"log"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc/mvccpb"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"go.etcd.io/etcd/clientv3"
)
const (
@ -91,23 +89,7 @@ func isSegmentObj(key string) bool {
return index == 0
}
func isSegmentChannelRangeInQueryNodeChannelRange(segment *etcdpb.SegmentMeta) bool {
if segment.ChannelStart > segment.ChannelEnd {
log.Printf("Illegal segment channel range")
return false
}
var queryNodeChannelStart = Params.InsertChannelRange[0]
var queryNodeChannelEnd = Params.InsertChannelRange[1]
if segment.ChannelStart >= int32(queryNodeChannelStart) && segment.ChannelEnd <= int32(queryNodeChannelEnd) {
return true
}
return false
}
func printCollectionStruct(obj *etcdpb.CollectionMeta) {
func printCollectionStruct(obj *etcdpb.CollectionInfo) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v)
typeOfS := v.Type()
@ -120,7 +102,7 @@ func printCollectionStruct(obj *etcdpb.CollectionMeta) {
}
}
func printSegmentStruct(obj *etcdpb.SegmentMeta) {
func printSegmentStruct(obj *datapb.SegmentInfo) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v)
typeOfS := v.Type()
@ -140,8 +122,8 @@ func (mService *metaService) processCollectionCreate(id string, value string) {
if err != nil {
log.Println(err)
}
for _, partitionTag := range col.PartitionTags {
err = mService.replica.addPartition2(col.ID, partitionTag)
for _, partitionID := range col.PartitionIDs {
err = mService.replica.addPartition(col.ID, partitionID)
if err != nil {
log.Println(err)
}
@ -153,14 +135,11 @@ func (mService *metaService) processSegmentCreate(id string, value string) {
//println("Create Segment: ", id)
seg := mService.segmentUnmarshal(value)
if !isSegmentChannelRangeInQueryNodeChannelRange(seg) {
log.Println("Illegal segment channel range")
return
}
// TODO: what if seg == nil? We need to notify master and return rpc request failed
if seg != nil {
err := mService.replica.addSegment2(seg.SegmentID, seg.PartitionTag, seg.CollectionID, segTypeGrowing)
// TODO: get partition id from segment meta
err := mService.replica.addSegment(seg.SegmentID, seg.PartitionID, seg.CollectionID, segTypeGrowing)
if err != nil {
log.Println(err)
return
@ -181,122 +160,6 @@ func (mService *metaService) processCreate(key string, msg string) {
}
}
func (mService *metaService) processSegmentModify(id string, value string) {
seg := mService.segmentUnmarshal(value)
if !isSegmentChannelRangeInQueryNodeChannelRange(seg) {
return
}
if seg != nil {
targetSegment, err := mService.replica.getSegmentByID(seg.SegmentID)
if err != nil {
log.Println(err)
return
}
// TODO: do modify
fmt.Println(targetSegment)
}
}
func (mService *metaService) processCollectionModify(id string, value string) {
//println("Modify Collection: ", id)
col := mService.collectionUnmarshal(value)
if col != nil {
err := mService.replica.addPartitionsByCollectionMeta(col)
if err != nil {
log.Println(err)
}
err = mService.replica.removePartitionsByCollectionMeta(col)
if err != nil {
log.Println(err)
}
}
}
func (mService *metaService) processModify(key string, msg string) {
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
mService.processCollectionModify(objID, msg)
} else if isSegmentObj(key) {
objID := GetSegmentObjID(key)
mService.processSegmentModify(objID, msg)
} else {
println("can not process modify msg:", key)
}
}
func (mService *metaService) processSegmentDelete(id string) {
//println("Delete segment: ", id)
var segmentID, err = strconv.ParseInt(id, 10, 64)
if err != nil {
log.Println("Cannot parse segment id:" + id)
}
err = mService.replica.removeSegment(segmentID)
if err != nil {
log.Println(err)
return
}
}
func (mService *metaService) processCollectionDelete(id string) {
//println("Delete collection: ", id)
var collectionID, err = strconv.ParseInt(id, 10, 64)
if err != nil {
log.Println("Cannot parse collection id:" + id)
}
err = mService.replica.removeCollection(collectionID)
if err != nil {
log.Println(err)
return
}
}
func (mService *metaService) processDelete(key string) {
//println("process delete")
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
mService.processCollectionDelete(objID)
} else if isSegmentObj(key) {
objID := GetSegmentObjID(key)
mService.processSegmentDelete(objID)
} else {
println("can not process delete msg:", key)
}
}
func (mService *metaService) processResp(resp clientv3.WatchResponse) error {
err := resp.Err()
if err != nil {
return err
}
for _, ev := range resp.Events {
if ev.IsCreate() {
key := string(ev.Kv.Key)
msg := string(ev.Kv.Value)
mService.processCreate(key, msg)
} else if ev.IsModify() {
key := string(ev.Kv.Key)
msg := string(ev.Kv.Value)
mService.processModify(key, msg)
} else if ev.Type == mvccpb.DELETE {
key := string(ev.Kv.Key)
mService.processDelete(key)
} else {
println("Unrecognized etcd msg!")
}
}
return nil
}
func (mService *metaService) loadCollections() error {
keys, values, err := mService.kvBase.LoadWithPrefix(CollectionPrefix)
if err != nil {
@ -326,8 +189,8 @@ func (mService *metaService) loadSegments() error {
}
//----------------------------------------------------------------------- Unmarshal and Marshal
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionMeta {
col := etcdpb.CollectionMeta{}
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionInfo {
col := etcdpb.CollectionInfo{}
err := proto.UnmarshalText(value, &col)
if err != nil {
log.Println(err)
@ -336,7 +199,7 @@ func (mService *metaService) collectionUnmarshal(value string) *etcdpb.Collectio
return &col
}
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) string {
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionInfo) string {
value := proto.MarshalTextString(col)
if value == "" {
log.Println("marshal collection failed")
@ -345,8 +208,8 @@ func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) strin
return value
}
func (mService *metaService) segmentUnmarshal(value string) *etcdpb.SegmentMeta {
seg := etcdpb.SegmentMeta{}
func (mService *metaService) segmentUnmarshal(value string) *datapb.SegmentInfo {
seg := datapb.SegmentInfo{}
err := proto.UnmarshalText(value, &seg)
if err != nil {
log.Println(err)

View File

@ -1,12 +1,11 @@
package querynode
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
)
func TestMetaService_start(t *testing.T) {
@ -65,36 +64,6 @@ func TestMetaService_isSegmentObj(t *testing.T) {
assert.Equal(t, b2, false)
}
func TestMetaService_isSegmentChannelRangeInQueryNodeChannelRange(t *testing.T) {
var s = etcdpb.SegmentMeta{
SegmentID: UniqueID(0),
CollectionID: UniqueID(0),
PartitionTag: "partition0",
ChannelStart: 0,
ChannelEnd: 1,
OpenTime: Timestamp(0),
CloseTime: Timestamp(math.MaxUint64),
NumRows: UniqueID(0),
}
var b = isSegmentChannelRangeInQueryNodeChannelRange(&s)
assert.Equal(t, b, true)
s = etcdpb.SegmentMeta{
SegmentID: UniqueID(0),
CollectionID: UniqueID(0),
PartitionTag: "partition0",
ChannelStart: 128,
ChannelEnd: 256,
OpenTime: Timestamp(0),
CloseTime: Timestamp(math.MaxUint64),
NumRows: UniqueID(0),
}
b = isSegmentChannelRangeInQueryNodeChannelRange(&s)
assert.Equal(t, b, false)
}
func TestMetaService_printCollectionStruct(t *testing.T) {
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionID, false)
@ -102,14 +71,11 @@ func TestMetaService_printCollectionStruct(t *testing.T) {
}
func TestMetaService_printSegmentStruct(t *testing.T) {
var s = etcdpb.SegmentMeta{
var s = datapb.SegmentInfo{
SegmentID: UniqueID(0),
CollectionID: UniqueID(0),
PartitionTag: "partition0",
ChannelStart: 128,
ChannelEnd: 256,
PartitionID: defaultPartitionID,
OpenTime: Timestamp(0),
CloseTime: Timestamp(math.MaxUint64),
NumRows: UniqueID(0),
}
@ -146,8 +112,7 @@ func TestMetaService_processCollectionCreate(t *testing.T) {
>
>
>
segmentIDs: 0
partition_tags: "default"
partitionIDs: 2021
`
node.metaService.processCollectionCreate(id, value)
@ -168,10 +133,7 @@ func TestMetaService_processSegmentCreate(t *testing.T) {
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
id := "0"
value := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
value := `partitionID: 2021
`
(*node.metaService).processSegmentCreate(id, value)
@ -212,8 +174,7 @@ func TestMetaService_processCreate(t *testing.T) {
>
>
>
segmentIDs: 0
partition_tags: "default"
partitionIDs: 2021
`
(*node.metaService).processCreate(key1, msg1)
@ -225,10 +186,7 @@ func TestMetaService_processCreate(t *testing.T) {
assert.Equal(t, collection.ID(), UniqueID(0))
key2 := Params.MetaRootPath + "/segment/0"
msg2 := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
msg2 := `partitionID: 2021
`
(*node.metaService).processCreate(key2, msg2)
@ -238,430 +196,6 @@ func TestMetaService_processCreate(t *testing.T) {
node.Stop()
}
func TestMetaService_processSegmentModify(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
segmentID := UniqueID(0)
initTestMeta(t, node, collectionID, segmentID)
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
id := "0"
value := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
(*node.metaService).processSegmentCreate(id, value)
s, err := node.replica.getSegmentByID(segmentID)
assert.NoError(t, err)
assert.Equal(t, s.segmentID, segmentID)
newValue := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
// TODO: modify segment for testing processCollectionModify
(*node.metaService).processSegmentModify(id, newValue)
seg, err := node.replica.getSegmentByID(segmentID)
assert.NoError(t, err)
assert.Equal(t, seg.segmentID, segmentID)
node.Stop()
}
func TestMetaService_processCollectionModify(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
id := "0"
value := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "p0"
partition_tags: "p1"
partition_tags: "p2"
`
(*node.metaService).processCollectionCreate(id, value)
collectionNum := node.replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, 3)
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
assert.Equal(t, hasPartition, false)
newValue := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "p1"
partition_tags: "p2"
partition_tags: "p3"
`
(*node.metaService).processCollectionModify(id, newValue)
collection, err = node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, 3)
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
assert.Equal(t, hasPartition, false)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
assert.Equal(t, hasPartition, true)
node.Stop()
}
func TestMetaService_processModify(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
key1 := Params.MetaRootPath + "/collection/0"
msg1 := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "p0"
partition_tags: "p1"
partition_tags: "p2"
`
(*node.metaService).processCreate(key1, msg1)
collectionNum := node.replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, 3)
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
assert.Equal(t, hasPartition, false)
key2 := Params.MetaRootPath + "/segment/0"
msg2 := `partition_tag: "p1"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
(*node.metaService).processCreate(key2, msg2)
s, err := node.replica.getSegmentByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, s.segmentID, UniqueID(0))
// modify
// TODO: use different index for testing processCollectionModify
msg3 := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "p1"
partition_tags: "p2"
partition_tags: "p3"
`
(*node.metaService).processModify(key1, msg3)
collection, err = node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, partitionNum, 3)
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
assert.Equal(t, hasPartition, false)
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
assert.Equal(t, hasPartition, true)
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
assert.Equal(t, hasPartition, true)
msg4 := `partition_tag: "p1"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
(*node.metaService).processModify(key2, msg4)
seg, err := node.replica.getSegmentByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, seg.segmentID, UniqueID(0))
node.Stop()
}
func TestMetaService_processSegmentDelete(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
id := "0"
value := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
(*node.metaService).processSegmentCreate(id, value)
seg, err := node.replica.getSegmentByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, seg.segmentID, UniqueID(0))
(*node.metaService).processSegmentDelete("0")
mapSize := node.replica.getSegmentNum()
assert.Equal(t, mapSize, 0)
node.Stop()
}
func TestMetaService_processCollectionDelete(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
id := "0"
value := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "default"
`
(*node.metaService).processCollectionCreate(id, value)
collectionNum := node.replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
(*node.metaService).processCollectionDelete(id)
collectionNum = node.replica.getCollectionNum()
assert.Equal(t, collectionNum, 0)
node.Stop()
}
func TestMetaService_processDelete(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
key1 := Params.MetaRootPath + "/collection/0"
msg1 := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "default"
`
(*node.metaService).processCreate(key1, msg1)
collectionNum := node.replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := node.replica.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
key2 := Params.MetaRootPath + "/segment/0"
msg2 := `partition_tag: "default"
channel_start: 0
channel_end: 1
close_time: 18446744073709551615
`
(*node.metaService).processCreate(key2, msg2)
seg, err := node.replica.getSegmentByID(UniqueID(0))
assert.NoError(t, err)
assert.Equal(t, seg.segmentID, UniqueID(0))
(*node.metaService).processDelete(key1)
collectionsSize := node.replica.getCollectionNum()
assert.Equal(t, collectionsSize, 0)
mapSize := node.replica.getSegmentNum()
assert.Equal(t, mapSize, 0)
node.Stop()
}
func TestMetaService_processResp(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
metaChan := (*node.metaService).kvBase.WatchWithPrefix("")
select {
case <-node.queryNodeLoopCtx.Done():
return
case resp := <-metaChan:
_ = (*node.metaService).processResp(resp)
}
node.Stop()
}
func TestMetaService_loadCollections(t *testing.T) {
node := newQueryNodeMock()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)

View File

@ -58,10 +58,9 @@ type ParamTable struct {
StatsChannelName string
StatsReceiveBufSize int64
GracefulTime int64
MsgChannelSubName string
DefaultPartitionTag string
SliceIndex int
GracefulTime int64
MsgChannelSubName string
SliceIndex int
}
var Params ParamTable
@ -133,7 +132,6 @@ func (p *ParamTable) Init() {
p.initGracefulTime()
p.initMsgChannelSubName()
p.initDefaultPartitionTag()
p.initSliceIndex()
p.initFlowGraphMaxQueueLength()
@ -458,15 +456,6 @@ func (p *ParamTable) initDDChannelNames() {
p.DDChannelNames = ret
}
func (p *ParamTable) initDefaultPartitionTag() {
defaultTag, err := p.Load("common.defaultPartitionTag")
if err != nil {
panic(err)
}
p.DefaultPartitionTag = defaultTag
}
func (p *ParamTable) initSliceIndex() {
queryNodeID := p.QueryNodeID
queryNodeIDList := p.QueryNodeIDList()

View File

@ -165,8 +165,3 @@ func TestParamTable_ddChannelName(t *testing.T) {
contains := strings.Contains(names[0], "data-definition-0")
assert.Equal(t, contains, true)
}
func TestParamTable_defaultPartitionTag(t *testing.T) {
tag := Params.DefaultPartitionTag
assert.Equal(t, tag, "_default")
}

View File

@ -13,33 +13,19 @@ package querynode
import "C"
type Partition struct {
partitionTag string
id UniqueID
segments []*Segment
enableDM bool
id UniqueID
segments []*Segment
enableDM bool
}
func (p *Partition) ID() UniqueID {
return p.id
}
func (p *Partition) Tag() string {
return (*p).partitionTag
}
func (p *Partition) Segments() *[]*Segment {
return &(*p).segments
}
func newPartition2(partitionTag string) *Partition {
var newPartition = &Partition{
partitionTag: partitionTag,
enableDM: false,
}
return newPartition
}
func newPartition(partitionID UniqueID) *Partition {
var newPartition = &Partition{
id: partitionID,

View File

@ -19,7 +19,7 @@ func TestPartition_Segments(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment2(UniqueID(i), targetPartition.partitionTag, collection.ID(), segTypeGrowing)
err := node.replica.addSegment(UniqueID(i), targetPartition.ID(), collection.ID(), segTypeGrowing)
assert.NoError(t, err)
}
@ -28,7 +28,7 @@ func TestPartition_Segments(t *testing.T) {
}
func TestPartition_newPartition(t *testing.T) {
partitionTag := "default"
partition := newPartition2(partitionTag)
assert.Equal(t, partition.partitionTag, partitionTag)
partitionID := defaultPartitionID
partition := newPartition(partitionID)
assert.Equal(t, partition.ID(), defaultPartitionID)
}

View File

@ -20,6 +20,8 @@ import (
const ctxTimeInMillisecond = 5000
const closeWithDeadline = true
const defaultPartitionID = UniqueID(2021)
type queryServiceMock struct{}
func setup() {
@ -27,7 +29,7 @@ func setup() {
Params.MetaRootPath = "/etcd/test/root/querynode"
}
func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionMeta {
func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionInfo {
var fieldVec schemapb.FieldSchema
if isBinary {
fieldVec = schemapb.FieldSchema{
@ -76,21 +78,18 @@ func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.Collect
DataType: schemapb.DataType_INT32,
}
collectionName := rand.Int63n(1000000)
schema := schemapb.CollectionSchema{
Name: "collection-" + strconv.FormatInt(collectionName, 10),
AutoID: true,
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
collectionMeta := etcdpb.CollectionInfo{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
PartitionIDs: []UniqueID{defaultPartitionID},
}
return &collectionMeta
@ -111,10 +110,10 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
assert.Equal(t, collection.ID(), collectionID)
assert.Equal(t, node.replica.getCollectionNum(), 1)
err = node.replica.addPartition2(collection.ID(), collectionMeta.PartitionTags[0])
err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionIDs[0])
assert.NoError(t, err)
err = node.replica.addSegment2(segmentID, collectionMeta.PartitionTags[0], collectionID, segTypeGrowing)
err = node.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, segTypeGrowing)
assert.NoError(t, err)
}

View File

@ -18,7 +18,7 @@ func TestReduce_AllFunc(t *testing.T) {
collectionMeta := genTestCollectionMeta(collectionID, false)
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
const DIM = 16
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}

View File

@ -239,7 +239,6 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
return errors.New("unmarshal query failed")
}
collectionID := searchMsg.CollectionID
partitionTagsInQuery := query.PartitionNames
collection, err := ss.replica.getCollectionByID(collectionID)
if err != nil {
span.LogFields(oplog.Error(err))
@ -263,29 +262,30 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
searchResults := make([]*SearchResult, 0)
matchedSegments := make([]*Segment, 0)
//fmt.Println("search msg's partitionTag = ", partitionTagsInQuery)
//fmt.Println("search msg's partitionID = ", partitionIDsInQuery)
var partitionTagsInCol []string
var partitionIDsInCol []UniqueID
for _, partition := range collection.partitions {
partitionTag := partition.partitionTag
partitionTagsInCol = append(partitionTagsInCol, partitionTag)
partitionID := partition.ID()
partitionIDsInCol = append(partitionIDsInCol, partitionID)
}
var searchPartitionTag []string
if len(partitionTagsInQuery) == 0 {
searchPartitionTag = partitionTagsInCol
var searchPartitionIDs []UniqueID
partitionIDsInQuery := searchMsg.PartitionIDs
if len(partitionIDsInQuery) == 0 {
searchPartitionIDs = partitionIDsInCol
} else {
for _, tag := range partitionTagsInCol {
for _, toMatchTag := range partitionTagsInQuery {
re := regexp.MustCompile("^" + toMatchTag + "$")
if re.MatchString(tag) {
searchPartitionTag = append(searchPartitionTag, tag)
for _, id := range partitionIDsInCol {
for _, toMatchID := range partitionIDsInQuery {
re := regexp.MustCompile("^" + strconv.FormatInt(toMatchID, 10) + "$")
if re.MatchString(strconv.FormatInt(id, 10)) {
searchPartitionIDs = append(searchPartitionIDs, id)
}
}
}
}
for _, partitionTag := range searchPartitionTag {
partition, _ := ss.replica.getPartitionByTag(collectionID, partitionTag)
for _, partitionID := range searchPartitionIDs {
partition, _ := ss.replica.getPartitionByID(collectionID, partitionID)
for _, segment := range partition.segments {
//fmt.Println("dsl = ", dsl)

View File

@ -61,8 +61,6 @@ func TestSearch_Search(t *testing.T) {
}
query := milvuspb.SearchRequest{
CollectionName: "collection0",
PartitionNames: []string{"default"},
Dsl: dslString,
PlaceholderGroup: placeGroupByte,
}
@ -136,12 +134,12 @@ func TestSearch_Search(t *testing.T) {
Timestamp: uint64(10 + 1000),
SourceID: 0,
},
CollectionID: UniqueID(0),
PartitionName: "default",
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
CollectionID: UniqueID(0),
PartitionID: defaultPartitionID,
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},
@ -255,8 +253,6 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
}
query := milvuspb.SearchRequest{
CollectionName: "collection0",
PartitionNames: []string{"default"},
Dsl: dslString,
PlaceholderGroup: placeGroupByte,
}
@ -334,12 +330,12 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
Timestamp: uint64(i + 1000),
SourceID: 0,
},
CollectionID: UniqueID(0),
PartitionName: "default",
SegmentID: int64(segmentID),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
CollectionID: UniqueID(0),
PartitionID: defaultPartitionID,
SegmentID: int64(segmentID),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},

View File

@ -36,7 +36,6 @@ type Segment struct {
segmentPtr C.CSegmentInterface
segmentID UniqueID
partitionTag string // TODO: use partitionID
partitionID UniqueID
collectionID UniqueID
lastMemSize int64
@ -81,25 +80,6 @@ func (s *Segment) getType() segmentType {
return s.segmentType
}
func newSegment2(collection *Collection, segmentID int64, partitionTag string, collectionID UniqueID, segType segmentType) *Segment {
/*
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
*/
initIndexParam := make(map[int64]indexParam)
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID), segType)
var newSegment = &Segment{
segmentPtr: segmentPtr,
segmentType: segType,
segmentID: segmentID,
partitionTag: partitionTag,
collectionID: collectionID,
indexParam: initIndexParam,
}
return newSegment
}
func newSegment(collection *Collection, segmentID int64, partitionID UniqueID, collectionID UniqueID, segType segmentType) *Segment {
/*
CSegmentInterface

View File

@ -22,7 +22,7 @@ func TestSegment_newSegment(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
deleteSegment(segment)
deleteCollection(collection)
@ -36,7 +36,7 @@ func TestSegment_deleteSegment(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
deleteSegment(segment)
@ -52,7 +52,7 @@ func TestSegment_getRowCount(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -99,7 +99,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -152,7 +152,7 @@ func TestSegment_getMemSize(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -199,7 +199,7 @@ func TestSegment_segmentInsert(t *testing.T) {
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -242,7 +242,7 @@ func TestSegment_segmentDelete(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -291,7 +291,7 @@ func TestSegment_segmentSearch(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}
@ -372,7 +372,7 @@ func TestSegment_segmentPreInsert(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
const DIM = 16
@ -410,7 +410,7 @@ func TestSegment_segmentPreDelete(t *testing.T) {
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
assert.Equal(t, segmentID, segment.segmentID)
ids := []int64{1, 2, 3}

View File

@ -1,19 +1,13 @@
package rocksmq
var Rmq *RocksMQ
type Consumer struct {
GroupName string
ChannelName string
MsgNum chan int
}
var rmq *RocksMQ
func InitRmq(rocksdbName string, idAllocator IDAllocator) error {
var err error
Rmq, err = NewRocksMQ(rocksdbName, idAllocator)
rmq, err = NewRocksMQ(rocksdbName, idAllocator)
return err
}
func GetRmq() *RocksMQ {
return Rmq
return rmq
}

View File

@ -55,7 +55,7 @@ type ProducerMessage struct {
type ConsumerMessage struct {
msgID UniqueID
Payload []byte
payload []byte
}
type Channel struct {
@ -75,8 +75,6 @@ type RocksMQ struct {
idAllocator IDAllocator
produceMu sync.Mutex
consumeMu sync.Mutex
notify map[string][]Consumer
//ctx context.Context
//serverLoopWg sync.WaitGroup
//serverLoopCtx context.Context
@ -107,16 +105,9 @@ func NewRocksMQ(name string, idAllocator IDAllocator) (*RocksMQ, error) {
idAllocator: idAllocator,
}
rmq.channels = make(map[string]*Channel)
rmq.notify = make(map[string][]Consumer)
return rmq, nil
}
func NewProducerMessage(data []byte) *ProducerMessage {
return &ProducerMessage{
payload: data,
}
}
func (rmq *RocksMQ) checkKeyExist(key string) bool {
val, _ := rmq.kv.Load(key)
return val != ""
@ -237,15 +228,7 @@ func (rmq *RocksMQ) Produce(channelName string, messages []ProducerMessage) erro
kvChannelEndID := channelName + "/end_id"
kvValues[kvChannelEndID] = strconv.FormatInt(idEnd, 10)
err = rmq.kv.MultiSave(kvValues)
if err != nil {
return err
}
for _, consumer := range rmq.notify[channelName] {
consumer.MsgNum <- msgLen
}
return nil
return rmq.kv.MultiSave(kvValues)
}
func (rmq *RocksMQ) Consume(groupName string, channelName string, n int) ([]ConsumerMessage, error) {
@ -291,7 +274,7 @@ func (rmq *RocksMQ) Consume(groupName string, channelName string, n int) ([]Cons
}
msg := ConsumerMessage{
msgID: msgID,
Payload: val.Data(),
payload: val.Data(),
}
consumerMessage = append(consumerMessage, msg)
key.Free()

View File

@ -66,13 +66,13 @@ func TestRocksMQ(t *testing.T) {
cMsgs, err := rmq.Consume(groupName, channelName, 1)
assert.Nil(t, err)
assert.Equal(t, len(cMsgs), 1)
assert.Equal(t, string(cMsgs[0].Payload), "a_message")
assert.Equal(t, string(cMsgs[0].payload), "a_message")
cMsgs, err = rmq.Consume(groupName, channelName, 2)
assert.Nil(t, err)
assert.Equal(t, len(cMsgs), 2)
assert.Equal(t, string(cMsgs[0].Payload), "b_message")
assert.Equal(t, string(cMsgs[1].Payload), "c_message")
assert.Equal(t, string(cMsgs[0].payload), "b_message")
assert.Equal(t, string(cMsgs[1].payload), "c_message")
}
func TestRocksMQ_Loop(t *testing.T) {
@ -127,15 +127,15 @@ func TestRocksMQ_Loop(t *testing.T) {
cMsgs, err := rmq.Consume(groupName, channelName, loopNum)
assert.Nil(t, err)
assert.Equal(t, len(cMsgs), loopNum)
assert.Equal(t, string(cMsgs[0].Payload), "message_"+strconv.Itoa(0))
assert.Equal(t, string(cMsgs[loopNum-1].Payload), "message_"+strconv.Itoa(loopNum-1))
assert.Equal(t, string(cMsgs[0].payload), "message_"+strconv.Itoa(0))
assert.Equal(t, string(cMsgs[loopNum-1].payload), "message_"+strconv.Itoa(loopNum-1))
// Consume one message once
for i := 0; i < loopNum; i++ {
oneMsgs, err := rmq.Consume(groupName, channelName, 1)
assert.Nil(t, err)
assert.Equal(t, len(oneMsgs), 1)
assert.Equal(t, string(oneMsgs[0].Payload), "message_"+strconv.Itoa(i+loopNum))
assert.Equal(t, string(oneMsgs[0].payload), "message_"+strconv.Itoa(i+loopNum))
}
cMsgs, err = rmq.Consume(groupName, channelName, 1)