feat: support dropping index without releasing collection (#42941)

issue: #42942

This pr includes the following changes:
1. Added checks for index checker in querycoord to generate drop index
tasks
2. Added drop index interface to querynode
3. To avoid search failure after dropping the index, the querynode
allows the use of lazy mode (warmup=disable) to load raw data even when
indexes contain raw data.
4. In segcore, loading the index no longer deletes raw data; instead, it
evicts it.
5. In expr, the index is pinned to prevent concurrent errors.

---------

Signed-off-by: sunby <sunbingyi1992@gmail.com>
This commit is contained in:
Bingyi Sun 2025-09-02 16:17:52 +08:00 committed by GitHub
parent aa4ef9c996
commit 0c0630cc38
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
71 changed files with 2161 additions and 978 deletions

View File

@ -39,6 +39,8 @@ struct LoadFieldDataInfo {
int64_t storage_version = 0; int64_t storage_version = 0;
milvus::proto::common::LoadPriority load_priority = milvus::proto::common::LoadPriority load_priority =
milvus::proto::common::LoadPriority::HIGH; milvus::proto::common::LoadPriority::HIGH;
CacheWarmupPolicy warmup_policy =
CacheWarmupPolicy::CacheWarmupPolicy_Disable;
}; };
struct LoadDeletedRecordInfo { struct LoadDeletedRecordInfo {

View File

@ -106,7 +106,8 @@ PhyColumnExpr::DoEval(OffsetVector* input) {
expr_->GetColumn().data_type_, expr_->GetColumn().data_type_,
expr_->GetColumn().field_id_, expr_->GetColumn().field_id_,
chunk_id, chunk_id,
data_barrier); data_barrier,
pinned_index_);
auto chunk_data_by_offset = chunk_data(chunk_offset); auto chunk_data_by_offset = chunk_data(chunk_offset);
if (!chunk_data_by_offset.has_value()) { if (!chunk_data_by_offset.has_value()) {
valid_res[processed_rows] = false; valid_res[processed_rows] = false;
@ -131,12 +132,12 @@ PhyColumnExpr::DoEval(OffsetVector* input) {
T* res_value = res_vec->RawAsValues<T>(); T* res_value = res_vec->RawAsValues<T>();
TargetBitmapView valid_res(res_vec->GetValidRawData(), real_batch_size); TargetBitmapView valid_res(res_vec->GetValidRawData(), real_batch_size);
valid_res.set(); valid_res.set();
auto chunk_data = segment_chunk_reader_.GetChunkDataAccessor( auto chunk_data = segment_chunk_reader_.GetMultipleChunkDataAccessor(
expr_->GetColumn().data_type_, expr_->GetColumn().data_type_,
expr_->GetColumn().field_id_, expr_->GetColumn().field_id_,
is_indexed_,
current_chunk_id_, current_chunk_id_,
current_chunk_pos_); current_chunk_pos_,
pinned_index_);
for (int i = 0; i < real_batch_size; ++i) { for (int i = 0; i < real_batch_size; ++i) {
auto data = chunk_data(); auto data = chunk_data();
if (!data.has_value()) { if (!data.has_value()) {
@ -173,7 +174,8 @@ PhyColumnExpr::DoEval(OffsetVector* input) {
expr_->GetColumn().data_type_, expr_->GetColumn().data_type_,
expr_->GetColumn().field_id_, expr_->GetColumn().field_id_,
chunk_id, chunk_id,
data_barrier); data_barrier,
pinned_index_);
for (int i = chunk_id == current_chunk_id_ ? current_chunk_pos_ : 0; for (int i = chunk_id == current_chunk_id_ ? current_chunk_pos_ : 0;
i < chunk_size; i < chunk_size;

View File

@ -40,21 +40,20 @@ class PhyColumnExpr : public Expr {
segment_chunk_reader_(segment, active_count), segment_chunk_reader_(segment, active_count),
batch_size_(batch_size), batch_size_(batch_size),
expr_(expr) { expr_(expr) {
is_indexed_ = segment->HasIndex(expr_->GetColumn().field_id_); auto& schema = segment->get_schema();
auto& field_meta = schema[expr_->GetColumn().field_id_];
pinned_index_ = PinIndex(segment, field_meta);
is_indexed_ = pinned_index_.size() > 0;
if (segment->is_chunked()) { if (segment->is_chunked()) {
num_chunk_ = num_chunk_ =
is_indexed_ is_indexed_
? segment->num_chunk_index(expr_->GetColumn().field_id_) ? pinned_index_.size()
: segment->type() == SegmentType::Growing
? upper_div(segment_chunk_reader_.active_count_,
segment_chunk_reader_.SizePerChunk())
: segment->num_chunk_data(expr_->GetColumn().field_id_); : segment->num_chunk_data(expr_->GetColumn().field_id_);
} else { } else {
num_chunk_ = num_chunk_ = is_indexed_
is_indexed_ ? pinned_index_.size()
? segment->num_chunk_index(expr_->GetColumn().field_id_) : upper_div(segment_chunk_reader_.active_count_,
: upper_div(segment_chunk_reader_.active_count_, segment_chunk_reader_.SizePerChunk());
segment_chunk_reader_.SizePerChunk());
} }
AssertInfo( AssertInfo(
batch_size_ > 0, batch_size_ > 0,
@ -139,6 +138,7 @@ class PhyColumnExpr : public Expr {
const segcore::SegmentChunkReader segment_chunk_reader_; const segcore::SegmentChunkReader segment_chunk_reader_;
int64_t batch_size_; int64_t batch_size_;
std::shared_ptr<const milvus::expr::ColumnExpr> expr_; std::shared_ptr<const milvus::expr::ColumnExpr> expr_;
std::vector<PinWrapper<const index::IndexBase*>> pinned_index_;
}; };
} //namespace exec } //namespace exec

View File

@ -86,12 +86,14 @@ PhyCompareFilterExpr::ExecCompareExprDispatcher(OpType op, EvalCtx& context) {
expr_->left_data_type_, expr_->left_data_type_,
expr_->left_field_id_, expr_->left_field_id_,
left_chunk_id, left_chunk_id,
left_data_barrier); left_data_barrier,
pinned_index_left_);
auto right = segment_chunk_reader_.GetChunkDataAccessor( auto right = segment_chunk_reader_.GetChunkDataAccessor(
expr_->right_data_type_, expr_->right_data_type_,
expr_->right_field_id_, expr_->right_field_id_,
right_chunk_id, right_chunk_id,
right_data_barrier); right_data_barrier,
pinned_index_right_);
auto left_opt = left(left_chunk_offset); auto left_opt = left(left_chunk_offset);
auto right_opt = right(right_chunk_offset); auto right_opt = right(right_chunk_offset);
if (!left_opt.has_value() || !right_opt.has_value()) { if (!left_opt.has_value() || !right_opt.has_value()) {
@ -121,18 +123,18 @@ PhyCompareFilterExpr::ExecCompareExprDispatcher(OpType op, EvalCtx& context) {
TargetBitmapView valid_res(res_vec->GetValidRawData(), real_batch_size); TargetBitmapView valid_res(res_vec->GetValidRawData(), real_batch_size);
valid_res.set(); valid_res.set();
auto left = auto left = segment_chunk_reader_.GetMultipleChunkDataAccessor(
segment_chunk_reader_.GetChunkDataAccessor(expr_->left_data_type_, expr_->left_data_type_,
expr_->left_field_id_, expr_->left_field_id_,
is_left_indexed_, left_current_chunk_id_,
left_current_chunk_id_, left_current_chunk_pos_,
left_current_chunk_pos_); pinned_index_left_);
auto right = segment_chunk_reader_.GetChunkDataAccessor( auto right = segment_chunk_reader_.GetMultipleChunkDataAccessor(
expr_->right_data_type_, expr_->right_data_type_,
expr_->right_field_id_, expr_->right_field_id_,
is_right_indexed_,
right_current_chunk_id_, right_current_chunk_id_,
right_current_chunk_pos_); right_current_chunk_pos_,
pinned_index_right_);
for (int i = 0; i < real_batch_size; ++i) { for (int i = 0; i < real_batch_size; ++i) {
auto left_value = left(), right_value = right(); auto left_value = left(), right_value = right();
if (!left_value.has_value() || !right_value.has_value()) { if (!left_value.has_value() || !right_value.has_value()) {
@ -176,12 +178,14 @@ PhyCompareFilterExpr::ExecCompareExprDispatcher(OpType op, EvalCtx& context) {
expr_->left_data_type_, expr_->left_data_type_,
expr_->left_field_id_, expr_->left_field_id_,
chunk_id, chunk_id,
left_data_barrier); left_data_barrier,
pinned_index_left_);
auto right = segment_chunk_reader_.GetChunkDataAccessor( auto right = segment_chunk_reader_.GetChunkDataAccessor(
expr_->right_data_type_, expr_->right_data_type_,
expr_->right_field_id_, expr_->right_field_id_,
chunk_id, chunk_id,
right_data_barrier); right_data_barrier,
pinned_index_right_);
for (int i = chunk_id == current_chunk_id_ ? current_chunk_pos_ : 0; for (int i = chunk_id == current_chunk_id_ ? current_chunk_pos_ : 0;
i < chunk_size; i < chunk_size;

View File

@ -140,19 +140,22 @@ class PhyCompareFilterExpr : public Expr {
segment_chunk_reader_(segment, active_count), segment_chunk_reader_(segment, active_count),
batch_size_(batch_size), batch_size_(batch_size),
expr_(expr) { expr_(expr) {
is_left_indexed_ = segment->HasIndex(left_field_); auto& schema = segment->get_schema();
is_right_indexed_ = segment->HasIndex(right_field_); auto& left_field_meta = schema[left_field_];
auto& right_field_meta = schema[right_field_];
pinned_index_left_ = PinIndex(segment, left_field_meta);
pinned_index_right_ = PinIndex(segment, right_field_meta);
is_left_indexed_ = pinned_index_left_.size() > 0;
is_right_indexed_ = pinned_index_right_.size() > 0;
if (segment->is_chunked()) { if (segment->is_chunked()) {
left_num_chunk_ = left_num_chunk_ =
is_left_indexed_ is_left_indexed_ ? pinned_index_left_.size()
? segment->num_chunk_index(expr_->left_field_id_)
: segment->type() == SegmentType::Growing : segment->type() == SegmentType::Growing
? upper_div(segment_chunk_reader_.active_count_, ? upper_div(segment_chunk_reader_.active_count_,
segment_chunk_reader_.SizePerChunk()) segment_chunk_reader_.SizePerChunk())
: segment->num_chunk_data(left_field_); : segment->num_chunk_data(left_field_);
right_num_chunk_ = right_num_chunk_ =
is_right_indexed_ is_right_indexed_ ? pinned_index_right_.size()
? segment->num_chunk_index(expr_->right_field_id_)
: segment->type() == SegmentType::Growing : segment->type() == SegmentType::Growing
? upper_div(segment_chunk_reader_.active_count_, ? upper_div(segment_chunk_reader_.active_count_,
segment_chunk_reader_.SizePerChunk()) segment_chunk_reader_.SizePerChunk())
@ -160,7 +163,7 @@ class PhyCompareFilterExpr : public Expr {
num_chunk_ = left_num_chunk_; num_chunk_ = left_num_chunk_;
} else { } else {
num_chunk_ = is_left_indexed_ num_chunk_ = is_left_indexed_
? segment->num_chunk_index(expr_->left_field_id_) ? pinned_index_left_.size()
: upper_div(segment_chunk_reader_.active_count_, : upper_div(segment_chunk_reader_.active_count_,
segment_chunk_reader_.SizePerChunk()); segment_chunk_reader_.SizePerChunk());
} }
@ -551,6 +554,8 @@ class PhyCompareFilterExpr : public Expr {
const segcore::SegmentChunkReader segment_chunk_reader_; const segcore::SegmentChunkReader segment_chunk_reader_;
int64_t batch_size_; int64_t batch_size_;
std::shared_ptr<const milvus::expr::CompareExpr> expr_; std::shared_ptr<const milvus::expr::CompareExpr> expr_;
std::vector<PinWrapper<const index::IndexBase*>> pinned_index_left_;
std::vector<PinWrapper<const index::IndexBase*>> pinned_index_right_;
}; };
} //namespace exec } //namespace exec
} // namespace milvus } // namespace milvus

View File

@ -56,38 +56,41 @@ PhyExistsFilterExpr::EvalJsonExistsForIndex() {
if (cached_index_chunk_id_ != 0) { if (cached_index_chunk_id_ != 0) {
cached_index_chunk_id_ = 0; cached_index_chunk_id_ = 0;
auto pointer = milvus::Json::pointer(expr_->column_.nested_path_); auto pointer = milvus::Json::pointer(expr_->column_.nested_path_);
auto pw = segment_->GetJsonIndex(expr_->column_.field_id_, pointer); auto* index = pinned_index_[cached_index_chunk_id_].get();
auto* index = pw.get();
AssertInfo(index != nullptr, AssertInfo(index != nullptr,
"Cannot find json index with path: " + pointer); "Cannot find json index with path: " + pointer);
switch (index->GetCastType().data_type()) { switch (index->GetCastType().data_type()) {
case JsonCastType::DataType::DOUBLE: { case JsonCastType::DataType::DOUBLE: {
auto* json_index = auto* json_index =
dynamic_cast<index::JsonInvertedIndex<double>*>(index); const_cast<index::JsonInvertedIndex<double>*>(
dynamic_cast<const index::JsonInvertedIndex<double>*>(
index));
cached_index_chunk_res_ = std::make_shared<TargetBitmap>( cached_index_chunk_res_ = std::make_shared<TargetBitmap>(
std::move(json_index->Exists())); std::move(json_index->Exists()));
break; break;
} }
case JsonCastType::DataType::VARCHAR: { case JsonCastType::DataType::VARCHAR: {
auto* json_index = auto* json_index = const_cast<
dynamic_cast<index::JsonInvertedIndex<std::string>*>(index); index::JsonInvertedIndex<std::string>*>(
dynamic_cast<const index::JsonInvertedIndex<std::string>*>(
index));
cached_index_chunk_res_ = std::make_shared<TargetBitmap>( cached_index_chunk_res_ = std::make_shared<TargetBitmap>(
std::move(json_index->Exists())); std::move(json_index->Exists()));
break; break;
} }
case JsonCastType::DataType::BOOL: { case JsonCastType::DataType::BOOL: {
auto* json_index = auto* json_index = const_cast<index::JsonInvertedIndex<bool>*>(
dynamic_cast<index::JsonInvertedIndex<bool>*>(index); dynamic_cast<const index::JsonInvertedIndex<bool>*>(index));
cached_index_chunk_res_ = std::make_shared<TargetBitmap>( cached_index_chunk_res_ = std::make_shared<TargetBitmap>(
std::move(json_index->Exists())); std::move(json_index->Exists()));
break; break;
} }
case JsonCastType::DataType::JSON: { case JsonCastType::DataType::JSON: {
auto* json_flat_index = auto* json_flat_index = const_cast<index::JsonFlatIndex*>(
dynamic_cast<index::JsonFlatIndex*>(index); dynamic_cast<const index::JsonFlatIndex*>(index));
auto executor = auto executor =
json_flat_index->create_executor<double>(pointer); json_flat_index->create_executor<double>(pointer);
cached_index_chunk_res_ = std::make_shared<TargetBitmap>( cached_index_chunk_res_ = std::make_shared<TargetBitmap>(

View File

@ -41,6 +41,22 @@ namespace exec {
enum class FilterType { sequential = 0, random = 1 }; enum class FilterType { sequential = 0, random = 1 };
inline std::vector<PinWrapper<const index::IndexBase*>>
PinIndex(const segcore::SegmentInternalInterface* segment,
const FieldMeta& field_meta,
const std::vector<std::string>& path = {},
DataType data_type = DataType::NONE,
bool any_type = false,
bool is_array = false) {
if (field_meta.get_data_type() == DataType::JSON) {
auto pointer = milvus::Json::pointer(path);
return segment->PinJsonIndex(
field_meta.get_id(), pointer, data_type, any_type, is_array);
} else {
return segment->PinIndex(field_meta.get_id());
}
}
class Expr { class Expr {
public: public:
Expr(DataType type, Expr(DataType type,
@ -174,20 +190,15 @@ class SegmentExpr : public Expr {
pk_type_ = field_meta.get_data_type(); pk_type_ = field_meta.get_data_type();
} }
if (field_meta.get_data_type() == DataType::JSON) { pinned_index_ = PinIndex(segment_,
auto pointer = milvus::Json::pointer(nested_path_); field_meta,
if (is_index_mode_ = segment_->HasIndex(field_id_, nested_path_,
pointer, value_type_,
value_type_, allow_any_json_cast_type_,
allow_any_json_cast_type_, is_json_contains_);
is_json_contains_)) { if (pinned_index_.size() > 0) {
num_index_chunk_ = 1; is_index_mode_ = true;
} num_index_chunk_ = pinned_index_.size();
} else {
is_index_mode_ = segment_->HasIndex(field_id_);
if (is_index_mode_) {
num_index_chunk_ = segment_->num_chunk_index(field_id_);
}
} }
// if index not include raw data, also need load data // if index not include raw data, also need load data
if (segment_->HasFieldData(field_id_)) { if (segment_->HasFieldData(field_id_)) {
@ -410,8 +421,8 @@ class SegmentExpr : public Expr {
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
TargetBitmap valid_res(input->size()); TargetBitmap valid_res(input->size());
auto pw = segment_->chunk_scalar_index<IndexInnerType>(field_id_, 0); auto scalar_index = dynamic_cast<const Index*>(pinned_index_[0].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
auto valid_result = index_ptr->IsNotNull(); auto valid_result = index_ptr->IsNotNull();
for (auto i = 0; i < input->size(); ++i) { for (auto i = 0; i < input->size(); ++i) {
@ -439,8 +450,8 @@ class SegmentExpr : public Expr {
using IndexInnerType = std:: using IndexInnerType = std::
conditional_t<std::is_same_v<T, std::string_view>, std::string, T>; conditional_t<std::is_same_v<T, std::string_view>, std::string, T>;
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
auto pw = segment_->chunk_scalar_index<IndexInnerType>(field_id_, 0); auto scalar_index = dynamic_cast<const Index*>(pinned_index_[0].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
auto valid_result = index_ptr->IsNotNull(); auto valid_result = index_ptr->IsNotNull();
auto batch_size = input->size(); auto batch_size = input->size();
@ -889,8 +900,8 @@ class SegmentExpr : public Expr {
if (field_type_ == DataType::JSON) { if (field_type_ == DataType::JSON) {
auto pointer = milvus::Json::pointer(nested_path_); auto pointer = milvus::Json::pointer(nested_path_);
json_pw = segment_->chunk_json_index(field_id_, pointer, i);
json_pw = pinned_index_[i];
// check if it is a json flat index, if so, create a json flat index query executor // check if it is a json flat index, if so, create a json flat index query executor
auto json_flat_index = auto json_flat_index =
dynamic_cast<const index::JsonFlatIndex*>( dynamic_cast<const index::JsonFlatIndex*>(
@ -909,9 +920,9 @@ class SegmentExpr : public Expr {
index_ptr = dynamic_cast<Index*>(json_index); index_ptr = dynamic_cast<Index*>(json_index);
} }
} else { } else {
pw = segment_->chunk_scalar_index<IndexInnerType>(field_id_, auto scalar_index =
i); dynamic_cast<const Index*>(pinned_index_[i].get());
index_ptr = const_cast<Index*>(pw.get()); index_ptr = const_cast<Index*>(scalar_index);
} }
cached_index_chunk_res_ = std::make_shared<TargetBitmap>( cached_index_chunk_res_ = std::make_shared<TargetBitmap>(
std::move(func(index_ptr, values...))); std::move(func(index_ptr, values...)));
@ -1046,9 +1057,9 @@ class SegmentExpr : public Expr {
element_type); element_type);
} }
} }
auto pw = auto scalar_index =
segment_->chunk_scalar_index<IndexInnerType>(field_id_, 0); dynamic_cast<const Index*>(pinned_index_[0].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
const auto& res = index_ptr->IsNotNull(); const auto& res = index_ptr->IsNotNull();
for (auto i = 0; i < batch_size; ++i) { for (auto i = 0; i < batch_size; ++i) {
valid_result[i] = res[input[i]]; valid_result[i] = res[input[i]];
@ -1176,9 +1187,9 @@ class SegmentExpr : public Expr {
// It avoids indexing execute for every batch because indexing // It avoids indexing execute for every batch because indexing
// executing costs quite much time. // executing costs quite much time.
if (cached_index_chunk_id_ != i) { if (cached_index_chunk_id_ != i) {
auto pw = auto scalar_index =
segment_->chunk_scalar_index<IndexInnerType>(field_id_, i); dynamic_cast<const Index*>(pinned_index_[i].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
auto execute_sub_batch = [](Index* index_ptr) { auto execute_sub_batch = [](Index* index_ptr) {
TargetBitmap res = index_ptr->IsNotNull(); TargetBitmap res = index_ptr->IsNotNull();
return res; return res;
@ -1215,9 +1226,9 @@ class SegmentExpr : public Expr {
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) { for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) {
auto pw = auto scalar_index =
segment_->chunk_scalar_index<IndexInnerType>(field_id_, i); dynamic_cast<const Index*>(pinned_index_[i].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
func(index_ptr, values...); func(index_ptr, values...);
} }
} }
@ -1235,9 +1246,9 @@ class SegmentExpr : public Expr {
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
if (op == OpType::Match || op == OpType::InnerMatch || if (op == OpType::Match || op == OpType::InnerMatch ||
op == OpType::PostfixMatch) { op == OpType::PostfixMatch) {
auto pw = segment_->chunk_scalar_index<IndexInnerType>( auto scalar_index = dynamic_cast<const Index*>(
field_id_, current_index_chunk_); pinned_index_[current_index_chunk_].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
// 1, index support regex query and try use it, then index handles the query; // 1, index support regex query and try use it, then index handles the query;
// 2, index has raw data, then call index.Reverse_Lookup to handle the query; // 2, index has raw data, then call index.Reverse_Lookup to handle the query;
return (index_ptr->TryUseRegexQuery() && return (index_ptr->TryUseRegexQuery() &&
@ -1256,9 +1267,9 @@ class SegmentExpr : public Expr {
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) { for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) {
auto pw = auto scalar_index =
segment_->chunk_scalar_index<IndexInnerType>(field_id_, i); dynamic_cast<const Index*>(pinned_index_[i].get());
auto* index_ptr = const_cast<Index*>(pw.get()); auto* index_ptr = const_cast<Index*>(scalar_index);
if (!index_ptr->HasRawData()) { if (!index_ptr->HasRawData()) {
return false; return false;
} }
@ -1272,17 +1283,6 @@ class SegmentExpr : public Expr {
use_index_ = false; use_index_ = false;
} }
bool
CanUseNgramIndex(FieldId field_id) const {
return segment_->HasNgramIndex(field_id);
}
bool
CanUseNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const {
return segment_->HasNgramIndexForJson(field_id, nested_path);
}
bool bool
PlanUseJsonStats(EvalCtx& context) const { PlanUseJsonStats(EvalCtx& context) const {
return context.get_exec_context() return context.get_exec_context()
@ -1320,6 +1320,7 @@ class SegmentExpr : public Expr {
// sometimes need to skip index and using raw data // sometimes need to skip index and using raw data
// default true means use index as much as possible // default true means use index as much as possible
bool use_index_{true}; bool use_index_{true};
std::vector<PinWrapper<const index::IndexBase*>> pinned_index_{};
int64_t active_count_{0}; int64_t active_count_{0};
int64_t num_data_chunk_{0}; int64_t num_data_chunk_{0};

View File

@ -37,8 +37,7 @@ PhyUnaryRangeFilterExpr::CanUseIndexForArray() {
using Index = index::ScalarIndex<IndexInnerType>; using Index = index::ScalarIndex<IndexInnerType>;
for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) { for (size_t i = current_index_chunk_; i < num_index_chunk_; i++) {
auto pw = segment_->chunk_scalar_index<IndexInnerType>(field_id_, i); auto index_ptr = dynamic_cast<const Index*>(pinned_index_[i].get());
auto index_ptr = const_cast<Index*>(pw.get());
if (index_ptr->GetIndexType() == if (index_ptr->GetIndexType() ==
milvus::index::ScalarIndexType::HYBRID || milvus::index::ScalarIndexType::HYBRID ||
@ -201,8 +200,7 @@ PhyUnaryRangeFilterExpr::Eval(EvalCtx& context, VectorPtr& result) {
case DataType::JSON: { case DataType::JSON: {
auto val_type = expr_->val_.val_case(); auto val_type = expr_->val_.val_case();
auto val_type_inner = FromValCase(val_type); auto val_type_inner = FromValCase(val_type);
if (CanExecNgramMatchForJson(val_type_inner) && if (CanExecNgramMatchForJson() && !has_offset_input_) {
!has_offset_input_) {
auto res = ExecNgramMatch(); auto res = ExecNgramMatch();
// If nullopt is returned, it means the query cannot be // If nullopt is returned, it means the query cannot be
// optimized by ngram index. Forward it to the normal path. // optimized by ngram index. Forward it to the normal path.
@ -1250,7 +1248,7 @@ PhyUnaryRangeFilterExpr::ExecRangeVisitorImpl(EvalCtx& context) {
fmt::format("match query does not support iterative filter")); fmt::format("match query does not support iterative filter"));
} }
return ExecTextMatch(); return ExecTextMatch();
} else if (CanExecNgramMatch(expr_->op_type_)) { } else if (CanExecNgramMatch()) {
auto res = ExecNgramMatch(); auto res = ExecNgramMatch();
// If nullopt is returned, it means the query cannot be // If nullopt is returned, it means the query cannot be
// optimized by ngram index. Forward it to the normal path. // optimized by ngram index. Forward it to the normal path.
@ -1705,16 +1703,13 @@ PhyUnaryRangeFilterExpr::CanUseIndex() {
is_index_mode_ && SegmentExpr::CanUseIndex<T>(expr_->op_type_) && is_index_mode_ && SegmentExpr::CanUseIndex<T>(expr_->op_type_) &&
// Ngram index should be used in specific execution path (CanExecNgramMatch -> ExecNgramMatch). // Ngram index should be used in specific execution path (CanExecNgramMatch -> ExecNgramMatch).
// TODO: if multiple indexes are supported, this logic should be changed // TODO: if multiple indexes are supported, this logic should be changed
!segment_->HasNgramIndex(field_id_); pinned_ngram_index_.get() == nullptr;
return use_index_; return use_index_;
} }
bool bool
PhyUnaryRangeFilterExpr::CanUseIndexForJson(DataType val_type) { PhyUnaryRangeFilterExpr::CanUseIndexForJson(DataType val_type) {
auto has_index = bool has_index = pinned_index_.size() > 0;
segment_->HasIndex(field_id_,
milvus::Json::pointer(expr_->column_.nested_path_),
val_type);
switch (val_type) { switch (val_type) {
case DataType::STRING: case DataType::STRING:
case DataType::VARCHAR: case DataType::VARCHAR:
@ -1832,24 +1827,13 @@ PhyUnaryRangeFilterExpr::ExecTextMatch() {
}; };
bool bool
PhyUnaryRangeFilterExpr::CanExecNgramMatch(proto::plan::OpType op_type) { PhyUnaryRangeFilterExpr::CanExecNgramMatch() {
return (op_type == proto::plan::OpType::InnerMatch || return pinned_ngram_index_.get() != nullptr && !has_offset_input_;
op_type == proto::plan::OpType::Match ||
op_type == proto::plan::OpType::PrefixMatch ||
op_type == proto::plan::OpType::PostfixMatch) &&
!has_offset_input_ && CanUseNgramIndex(field_id_);
} }
bool bool
PhyUnaryRangeFilterExpr::CanExecNgramMatchForJson(DataType val_type) { PhyUnaryRangeFilterExpr::CanExecNgramMatchForJson() {
return (val_type == DataType::STRING || val_type == DataType::VARCHAR) && return pinned_ngram_index_.get() != nullptr && !has_offset_input_;
(expr_->op_type_ == proto::plan::OpType::InnerMatch ||
expr_->op_type_ == proto::plan::OpType::Match ||
expr_->op_type_ == proto::plan::OpType::PrefixMatch ||
expr_->op_type_ == proto::plan::OpType::PostfixMatch) &&
!has_offset_input_ &&
CanUseNgramIndexForJson(
field_id_, milvus::Json::pointer(expr_->column_.nested_path_));
} }
std::optional<VectorPtr> std::optional<VectorPtr>
@ -1866,14 +1850,7 @@ PhyUnaryRangeFilterExpr::ExecNgramMatch() {
} }
if (cached_ngram_match_res_ == nullptr) { if (cached_ngram_match_res_ == nullptr) {
PinWrapper<index::NgramInvertedIndex*> pinned_index; auto index = pinned_ngram_index_.get();
if (expr_->column_.data_type_ == DataType::JSON) {
pinned_index = segment_->GetNgramIndexForJson(
field_id_, milvus::Json::pointer(expr_->column_.nested_path_));
} else {
pinned_index = segment_->GetNgramIndex(field_id_);
}
index::NgramInvertedIndex* index = pinned_index.get();
AssertInfo(index != nullptr, AssertInfo(index != nullptr,
"ngram index should not be null, field_id: {}", "ngram index should not be null, field_id: {}",
field_id_.get()); field_id_.get());

View File

@ -752,6 +752,26 @@ class PhyUnaryRangeFilterExpr : public SegmentExpr {
batch_size, batch_size,
consistency_level), consistency_level),
expr_(expr) { expr_(expr) {
auto val_type = FromValCase(expr_->val_.val_case());
if ((val_type == DataType::STRING || val_type == DataType::VARCHAR) &&
(expr_->op_type_ == proto::plan::OpType::InnerMatch ||
expr_->op_type_ == proto::plan::OpType::Match ||
expr_->op_type_ == proto::plan::OpType::PrefixMatch ||
expr_->op_type_ == proto::plan::OpType::PostfixMatch)) {
// try to pin ngram index for json
auto field_id = expr_->column_.field_id_;
auto schema = segment->get_schema();
auto field_meta = schema[field_id];
if (field_meta.is_json()) {
auto pointer =
milvus::Json::pointer(expr_->column_.nested_path_);
pinned_ngram_index_ =
segment->GetNgramIndexForJson(field_id, pointer);
} else {
pinned_ngram_index_ = segment->GetNgramIndex(field_id);
}
}
} }
void void
@ -858,10 +878,10 @@ class PhyUnaryRangeFilterExpr : public SegmentExpr {
ExecTextMatch(); ExecTextMatch();
bool bool
CanExecNgramMatch(proto::plan::OpType op_type); CanExecNgramMatch();
bool bool
CanExecNgramMatchForJson(DataType val_type); CanExecNgramMatchForJson();
std::optional<VectorPtr> std::optional<VectorPtr>
ExecNgramMatch(); ExecNgramMatch();
@ -874,6 +894,7 @@ class PhyUnaryRangeFilterExpr : public SegmentExpr {
int64_t overflow_check_pos_{0}; int64_t overflow_check_pos_{0};
bool arg_inited_{false}; bool arg_inited_{false};
SingleElement value_arg_; SingleElement value_arg_;
PinWrapper<index::NgramInvertedIndex*> pinned_ngram_index_{nullptr};
}; };
} // namespace exec } // namespace exec
} // namespace milvus } // namespace milvus

View File

@ -21,6 +21,7 @@
#include "cachinglayer/CacheSlot.h" #include "cachinglayer/CacheSlot.h"
#include "common/QueryInfo.h" #include "common/QueryInfo.h"
#include "common/Types.h" #include "common/Types.h"
#include "index/ScalarIndex.h"
#include "knowhere/index/index_node.h" #include "knowhere/index/index_node.h"
#include "segcore/SegmentInterface.h" #include "segcore/SegmentInterface.h"
#include "segcore/SegmentGrowingImpl.h" #include "segcore/SegmentGrowingImpl.h"
@ -84,18 +85,24 @@ class SealedDataGetter : public DataGetter<T> {
int64_t, int64_t,
PinWrapper<std::pair<std::vector<std::string_view>, FixedVector<bool>>>> PinWrapper<std::pair<std::vector<std::string_view>, FixedVector<bool>>>>
pw_map_; pw_map_;
PinWrapper<const index::IndexBase*> index_ptr_;
// Getting str_view from segment is cpu-costly, this map is to cache this view for performance // Getting str_view from segment is cpu-costly, this map is to cache this view for performance
public: public:
SealedDataGetter(const segcore::SegmentSealed& segment, FieldId& field_id) SealedDataGetter(const segcore::SegmentSealed& segment, FieldId& field_id)
: segment_(segment), field_id_(field_id) { : segment_(segment), field_id_(field_id) {
from_data_ = segment_.HasFieldData(field_id_); from_data_ = segment_.HasFieldData(field_id_);
if (!from_data_ && !segment_.HasIndex(field_id_)) { if (!from_data_) {
ThrowInfo( auto index = segment_.PinIndex(field_id_);
UnexpectedError, if (index.empty()) {
"The segment:{} used to init data getter has no effective " ThrowInfo(
"data source, neither" UnexpectedError,
"index or data", "The segment:{} used to init data getter has no effective "
segment_.get_segment_id()); "data source, neither"
"index or data",
segment_.get_segment_id());
}
index_ptr_ = std::move(index[0]);
} }
} }
@ -130,8 +137,10 @@ class SealedDataGetter : public DataGetter<T> {
} }
} else { } else {
// null is not supported for indexed fields // null is not supported for indexed fields
auto pw = segment_.chunk_scalar_index<T>(field_id_, 0); AssertInfo(index_ptr_.get() != nullptr,
auto* chunk_index = pw.get(); "indexed field should have only one index");
auto chunk_index =
dynamic_cast<const index::ScalarIndex<T>*>(index_ptr_.get());
auto raw = chunk_index->Reverse_Lookup(idx); auto raw = chunk_index->Reverse_Lookup(idx);
AssertInfo(raw.has_value(), "field data not found"); AssertInfo(raw.has_value(), "field data not found");
return raw.value(); return raw.value();

View File

@ -44,6 +44,7 @@
#include "common/Tracer.h" #include "common/Tracer.h"
#include "common/Types.h" #include "common/Types.h"
#include "common/resource_c.h" #include "common/resource_c.h"
#include "folly/Synchronized.h"
#include "monitor/scope_metric.h" #include "monitor/scope_metric.h"
#include "google/protobuf/message_lite.h" #include "google/protobuf/message_lite.h"
#include "index/Index.h" #include "index/Index.h"
@ -114,7 +115,8 @@ ChunkedSegmentSealedImpl::LoadVecIndex(const LoadIndexInfo& info) {
!get_bit(index_ready_bitset_, field_id), !get_bit(index_ready_bitset_, field_id),
"vector index has been exist at " + std::to_string(field_id.get())); "vector index has been exist at " + std::to_string(field_id.get()));
LOG_INFO( LOG_INFO(
"Before setting field_bit for field index, fieldID:{}. segmentID:{}, ", "Before setting field_bit for field index, fieldID:{}. "
"segmentID:{}, ",
info.field_id, info.field_id,
id_); id_);
auto& field_meta = schema_->operator[](field_id); auto& field_meta = schema_->operator[](field_id);
@ -128,8 +130,7 @@ ChunkedSegmentSealedImpl::LoadVecIndex(const LoadIndexInfo& info) {
info.enable_mmap); info.enable_mmap);
if (request.has_raw_data && get_bit(field_data_ready_bitset_, field_id)) { if (request.has_raw_data && get_bit(field_data_ready_bitset_, field_id)) {
fields_.wlock()->erase(field_id); fields_.rlock()->at(field_id)->ManualEvictCache();
set_bit(field_data_ready_bitset_, field_id, false);
} }
if (get_bit(binlog_index_bitset_, field_id)) { if (get_bit(binlog_index_bitset_, field_id)) {
set_bit(binlog_index_bitset_, field_id, false); set_bit(binlog_index_bitset_, field_id, false);
@ -157,7 +158,8 @@ ChunkedSegmentSealedImpl::LoadScalarIndex(const LoadIndexInfo& info) {
// if segment is pk sorted, user created indexes bring no performance gain but extra memory usage // if segment is pk sorted, user created indexes bring no performance gain but extra memory usage
if (is_pk && is_sorted_by_pk_) { if (is_pk && is_sorted_by_pk_) {
LOG_INFO( LOG_INFO(
"segment pk sorted, skip user index loading for primary key field"); "segment pk sorted, skip user index loading for primary key "
"field");
return; return;
} }
@ -171,11 +173,12 @@ ChunkedSegmentSealedImpl::LoadScalarIndex(const LoadIndexInfo& info) {
if (auto it = info.index_params.find(index::INDEX_TYPE); if (auto it = info.index_params.find(index::INDEX_TYPE);
it != info.index_params.end() && it != info.index_params.end() &&
it->second == index::NGRAM_INDEX_TYPE) { it->second == index::NGRAM_INDEX_TYPE) {
if (ngram_indexings_.find(field_id) == ngram_indexings_.end()) { auto ngram_indexings = ngram_indexings_.wlock();
ngram_indexings_[field_id] = if (ngram_indexings->find(field_id) == ngram_indexings->end()) {
(*ngram_indexings)[field_id] =
std::unordered_map<std::string, index::CacheIndexBasePtr>(); std::unordered_map<std::string, index::CacheIndexBasePtr>();
} }
ngram_indexings_[field_id][path] = (*ngram_indexings)[field_id][path] =
std::move(const_cast<LoadIndexInfo&>(info).cache_index); std::move(const_cast<LoadIndexInfo&>(info).cache_index);
return; return;
} else { } else {
@ -186,7 +189,7 @@ ChunkedSegmentSealedImpl::LoadScalarIndex(const LoadIndexInfo& info) {
std::move(const_cast<LoadIndexInfo&>(info).cache_index); std::move(const_cast<LoadIndexInfo&>(info).cache_index);
index.cast_type = index.cast_type =
JsonCastType::FromString(info.index_params.at(JSON_CAST_TYPE)); JsonCastType::FromString(info.index_params.at(JSON_CAST_TYPE));
json_indices.push_back(std::move(index)); json_indices.wlock()->push_back(std::move(index));
return; return;
} }
} }
@ -194,10 +197,17 @@ ChunkedSegmentSealedImpl::LoadScalarIndex(const LoadIndexInfo& info) {
if (auto it = info.index_params.find(index::INDEX_TYPE); if (auto it = info.index_params.find(index::INDEX_TYPE);
it != info.index_params.end() && it != info.index_params.end() &&
it->second == index::NGRAM_INDEX_TYPE) { it->second == index::NGRAM_INDEX_TYPE) {
ngram_fields_.insert(field_id); auto [scalar_indexings, ngram_fields] =
lock(folly::wlock(scalar_indexings_), folly::wlock(ngram_fields_));
ngram_fields->insert(field_id);
scalar_indexings->insert(
{field_id,
std::move(const_cast<LoadIndexInfo&>(info).cache_index)});
} else {
scalar_indexings_.wlock()->insert(
{field_id,
std::move(const_cast<LoadIndexInfo&>(info).cache_index)});
} }
scalar_indexings_[field_id] =
std::move(const_cast<LoadIndexInfo&>(info).cache_index);
LoadResourceRequest request = LoadResourceRequest request =
milvus::index::IndexFactory::GetInstance().ScalarIndexLoadResource( milvus::index::IndexFactory::GetInstance().ScalarIndexLoadResource(
@ -215,8 +225,7 @@ ChunkedSegmentSealedImpl::LoadScalarIndex(const LoadIndexInfo& info) {
!is_pk) { !is_pk) {
// We do not erase the primary key field: if insert record is evicted from memory, when reloading it'll // We do not erase the primary key field: if insert record is evicted from memory, when reloading it'll
// need the pk field again. // need the pk field again.
fields_.wlock()->erase(field_id); fields_.rlock()->at(field_id)->ManualEvictCache();
set_bit(field_data_ready_bitset_, field_id, false);
} }
} }
@ -300,7 +309,8 @@ ChunkedSegmentSealedImpl::load_column_group_data_internal(
mmap_dir_path, mmap_dir_path,
merged_in_load_list); merged_in_load_list);
LOG_INFO( LOG_INFO(
"[StorageV2] segment {} loads column group {} with field ids {} " "[StorageV2] segment {} loads column group {} with field ids "
"{} "
"with " "with "
"num_rows " "num_rows "
"{} mmap_dir_path={}", "{} mmap_dir_path={}",
@ -497,18 +507,6 @@ ChunkedSegmentSealedImpl::AddFieldDataInfoForSealed(
field_data_info_ = field_data_info; field_data_info_ = field_data_info;
} }
// internal API: support scalar index only
int64_t
ChunkedSegmentSealedImpl::num_chunk_index(FieldId field_id) const {
auto& field_meta = schema_->operator[](field_id);
if (field_meta.is_vector()) {
return int64_t(vector_indexings_.is_ready(field_id));
}
return scalar_indexings_.count(field_id);
}
int64_t int64_t
ChunkedSegmentSealedImpl::num_chunk_data(FieldId field_id) const { ChunkedSegmentSealedImpl::num_chunk_data(FieldId field_id) const {
if (!get_bit(field_data_ready_bitset_, field_id)) { if (!get_bit(field_data_ready_bitset_, field_id)) {
@ -654,31 +652,24 @@ ChunkedSegmentSealedImpl::chunk_array_views_by_offsets(
if (auto column = get_column(field_id)) { if (auto column = get_column(field_id)) {
return column->ArrayViewsByOffsets(chunk_id, offsets); return column->ArrayViewsByOffsets(chunk_id, offsets);
} }
ThrowInfo( ThrowInfo(ErrorCode::UnexpectedError,
ErrorCode::UnexpectedError, "chunk_array_views_by_offsets only used for variable column "
"chunk_array_views_by_offsets only used for variable column field "); "field ");
}
PinWrapper<const index::IndexBase*>
ChunkedSegmentSealedImpl::chunk_index_impl(FieldId field_id,
int64_t chunk_id) const {
std::shared_lock lck(mutex_);
AssertInfo(scalar_indexings_.find(field_id) != scalar_indexings_.end(),
"Cannot find scalar_indexing with field_id: " +
std::to_string(field_id.get()));
auto slot = scalar_indexings_.at(field_id);
lck.unlock();
auto ca = SemiInlineGet(slot->PinCells({0}));
auto index = ca->get_cell_of(0);
return PinWrapper<const index::IndexBase*>(ca, index);
} }
PinWrapper<index::NgramInvertedIndex*> PinWrapper<index::NgramInvertedIndex*>
ChunkedSegmentSealedImpl::GetNgramIndex(FieldId field_id) const { ChunkedSegmentSealedImpl::GetNgramIndex(FieldId field_id) const {
std::shared_lock lck(mutex_); std::shared_lock lck(mutex_);
auto iter = scalar_indexings_.find(field_id); auto [scalar_indexings, ngram_fields] =
if (iter == scalar_indexings_.end()) { lock(folly::rlock(scalar_indexings_), folly::rlock(ngram_fields_));
auto has = ngram_fields->find(field_id);
if (has == ngram_fields->end()) {
return PinWrapper<index::NgramInvertedIndex*>(nullptr);
}
auto iter = scalar_indexings->find(field_id);
if (iter == scalar_indexings->end()) {
return PinWrapper<index::NgramInvertedIndex*>(nullptr); return PinWrapper<index::NgramInvertedIndex*>(nullptr);
} }
auto slot = iter->second.get(); auto slot = iter->second.get();
@ -696,23 +687,25 @@ PinWrapper<index::NgramInvertedIndex*>
ChunkedSegmentSealedImpl::GetNgramIndexForJson( ChunkedSegmentSealedImpl::GetNgramIndexForJson(
FieldId field_id, const std::string& nested_path) const { FieldId field_id, const std::string& nested_path) const {
std::shared_lock lck(mutex_); std::shared_lock lck(mutex_);
auto iter = ngram_indexings_.find(field_id); return ngram_indexings_.withRLock([&](auto& ngram_indexings) {
if (iter == ngram_indexings_.end() || auto iter = ngram_indexings.find(field_id);
iter->second.find(nested_path) == iter->second.end()) { if (iter == ngram_indexings.end() ||
return PinWrapper<index::NgramInvertedIndex*>(nullptr); iter->second.find(nested_path) == iter->second.end()) {
} return PinWrapper<index::NgramInvertedIndex*>(nullptr);
}
auto slot = iter->second.at(nested_path).get(); auto slot = iter->second.at(nested_path).get();
lck.unlock();
auto ca = SemiInlineGet(slot->PinCells({0})); auto ca = SemiInlineGet(slot->PinCells({0}));
auto index = dynamic_cast<index::NgramInvertedIndex*>(ca->get_cell_of(0)); auto index =
AssertInfo(index != nullptr, dynamic_cast<index::NgramInvertedIndex*>(ca->get_cell_of(0));
"ngram index cache for json is corrupted, field_id: {}, " AssertInfo(index != nullptr,
"nested_path: {}", "ngram index cache for json is corrupted, field_id: {}, "
field_id.get(), "nested_path: {}",
nested_path); field_id.get(),
return PinWrapper<index::NgramInvertedIndex*>(ca, index); nested_path);
return PinWrapper<index::NgramInvertedIndex*>(ca, index);
});
} }
int64_t int64_t
@ -883,15 +876,42 @@ ChunkedSegmentSealedImpl::DropIndex(const FieldId field_id) {
"Field id:" + std::to_string(field_id.get()) + "Field id:" + std::to_string(field_id.get()) +
" isn't one of system type when drop index"); " isn't one of system type when drop index");
auto& field_meta = schema_->operator[](field_id); auto& field_meta = schema_->operator[](field_id);
AssertInfo(field_meta.is_vector(), AssertInfo(!field_meta.is_vector(), "vector field cannot drop index");
"Field meta of offset:" + std::to_string(field_id.get()) +
" is not vector type");
std::unique_lock lck(mutex_); std::unique_lock lck(mutex_);
vector_indexings_.drop_field_indexing(field_id); auto [scalar_indexings, ngram_fields] =
lock(folly::wlock(scalar_indexings_), folly::wlock(ngram_fields_));
scalar_indexings->erase(field_id);
ngram_fields->erase(field_id);
set_bit(index_ready_bitset_, field_id, false); set_bit(index_ready_bitset_, field_id, false);
} }
void
ChunkedSegmentSealedImpl::DropJSONIndex(const FieldId field_id,
const std::string& nested_path) {
std::unique_lock lck(mutex_);
json_indices.withWLock([&](auto& vec) {
vec.erase(std::remove_if(vec.begin(),
vec.end(),
[field_id, nested_path](const auto& index) {
return index.field_id == field_id &&
index.nested_path == nested_path;
}),
vec.end());
});
ngram_indexings_.withWLock([&](auto& ngram_indexings) {
auto iter = ngram_indexings.find(field_id);
if (iter != ngram_indexings.end()) {
iter->second.erase(nested_path);
if (iter->second.empty()) {
ngram_indexings.erase(iter);
}
}
});
}
void void
ChunkedSegmentSealedImpl::check_search(const query::Plan* plan) const { ChunkedSegmentSealedImpl::check_search(const query::Plan* plan) const {
AssertInfo(plan, "Search plan is null"); AssertInfo(plan, "Search plan is null");
@ -1362,8 +1382,9 @@ ChunkedSegmentSealedImpl::ChunkedSegmentSealedImpl(
field_data_ready_bitset_(schema->size()), field_data_ready_bitset_(schema->size()),
index_ready_bitset_(schema->size()), index_ready_bitset_(schema->size()),
binlog_index_bitset_(schema->size()), binlog_index_bitset_(schema->size()),
ngram_fields_(schema->size()), ngram_fields_(std::unordered_set<FieldId>(schema->size())),
scalar_indexings_(schema->size()), scalar_indexings_(std::unordered_map<FieldId, index::CacheIndexBasePtr>(
schema->size())),
insert_record_(*schema, MAX_ROW_COUNT), insert_record_(*schema, MAX_ROW_COUNT),
schema_(schema), schema_(schema),
id_(segment_id), id_(segment_id),
@ -1403,9 +1424,9 @@ ChunkedSegmentSealedImpl::bulk_subscript(SystemFieldType system_type,
id_); id_);
switch (system_type) { switch (system_type) {
case SystemFieldType::Timestamp: case SystemFieldType::Timestamp:
AssertInfo( AssertInfo(insert_record_.timestamps_.num_chunk() == 1,
insert_record_.timestamps_.num_chunk() == 1, "num chunk of timestamp not equal to 1 for "
"num chunk of timestamp not equal to 1 for sealed segment"); "sealed segment");
bulk_subscript_impl<Timestamp>( bulk_subscript_impl<Timestamp>(
this->insert_record_.timestamps_.get_chunk_data(0), this->insert_record_.timestamps_.get_chunk_data(0),
seg_offsets, seg_offsets,
@ -1519,10 +1540,10 @@ ChunkedSegmentSealedImpl::ClearData() {
index_has_raw_data_.clear(); index_has_raw_data_.clear();
system_ready_count_ = 0; system_ready_count_ = 0;
num_rows_ = std::nullopt; num_rows_ = std::nullopt;
ngram_fields_.clear(); ngram_fields_.wlock()->clear();
scalar_indexings_.clear(); scalar_indexings_.wlock()->clear();
vector_indexings_.clear(); vector_indexings_.clear();
ngram_indexings_.clear(); ngram_indexings_.wlock()->clear();
insert_record_.clear(); insert_record_.clear();
fields_.wlock()->clear(); fields_.wlock()->clear();
variable_fields_avg_size_.clear(); variable_fields_avg_size_.clear();
@ -1575,10 +1596,15 @@ ChunkedSegmentSealedImpl::CreateTextIndex(FieldId field_id) {
index->AddTextSealed(std::string(value), is_valid, offset); index->AddTextSealed(std::string(value), is_valid, offset);
}); });
} else { // fetch raw data from index. } else { // fetch raw data from index.
auto field_index_iter = scalar_indexings_.find(field_id); auto field_index_iter =
AssertInfo(field_index_iter != scalar_indexings_.end(), scalar_indexings_.withRLock([&](auto& mapping) {
"failed to create text index, neither raw data nor " auto iter = mapping.find(field_id);
"index are found"); AssertInfo(iter != mapping.end(),
"failed to create text index, neither "
"raw data nor "
"index are found");
return iter;
});
auto accessor = auto accessor =
SemiInlineGet(field_index_iter->second->PinCells({0})); SemiInlineGet(field_index_iter->second->PinCells({0}));
auto ptr = accessor->get_cell_of(0); auto ptr = accessor->get_cell_of(0);
@ -1859,14 +1885,19 @@ ChunkedSegmentSealedImpl::bulk_subscript(FieldId field_id,
return get_raw_data(field_id, field_meta, seg_offsets, count); return get_raw_data(field_id, field_meta, seg_offsets, count);
} }
PinWrapper<const index::IndexBase*> pin_scalar_index_ptr;
auto scalar_indexes = PinIndex(field_id);
if (!scalar_indexes.empty()) {
pin_scalar_index_ptr = std::move(scalar_indexes[0]);
}
auto index_has_raw = HasRawData(field_id.get()); auto index_has_raw = HasRawData(field_id.get());
if (!IsVectorDataType(field_meta.get_data_type())) { if (!IsVectorDataType(field_meta.get_data_type())) {
// if field has load scalar index, reverse raw data from index // if field has load scalar index, reverse raw data from index
if (index_has_raw) { if (index_has_raw) {
auto index = chunk_index_impl(field_id, 0);
return ReverseDataFromIndex( return ReverseDataFromIndex(
index.get(), seg_offsets, count, field_meta); pin_scalar_index_ptr.get(), seg_offsets, count, field_meta);
} }
return get_raw_data(field_id, field_meta, seg_offsets, count); return get_raw_data(field_id, field_meta, seg_offsets, count);
} }
@ -1988,8 +2019,10 @@ ChunkedSegmentSealedImpl::HasRawData(int64_t field_id) const {
} else if (IsJsonDataType(field_meta.get_data_type())) { } else if (IsJsonDataType(field_meta.get_data_type())) {
return get_bit(field_data_ready_bitset_, fieldID); return get_bit(field_data_ready_bitset_, fieldID);
} else { } else {
auto scalar_index = scalar_indexings_.find(fieldID); auto has_scalar_index = scalar_indexings_.withRLock([&](auto& mapping) {
if (scalar_index != scalar_indexings_.end()) { return mapping.find(fieldID) != mapping.end();
});
if (has_scalar_index) {
AssertInfo( AssertInfo(
index_has_raw_data_.find(fieldID) != index_has_raw_data_.end(), index_has_raw_data_.find(fieldID) != index_has_raw_data_.end(),
"index_has_raw_data_ is not set for fieldID: " + "index_has_raw_data_ is not set for fieldID: " +
@ -2265,7 +2298,8 @@ ChunkedSegmentSealedImpl::generate_interim_index(const FieldId field_id,
} }
LOG_INFO( LOG_INFO(
"replace binlog with intermin index in segment {}, field {}.", "replace binlog with intermin index in segment {}, "
"field {}.",
this->get_segment_id(), this->get_segment_id(),
field_id.get()); field_id.get());
} }
@ -2283,7 +2317,8 @@ void
ChunkedSegmentSealedImpl::LazyCheckSchema(SchemaPtr sch) { ChunkedSegmentSealedImpl::LazyCheckSchema(SchemaPtr sch) {
if (sch->get_schema_version() > schema_->get_schema_version()) { if (sch->get_schema_version() > schema_->get_schema_version()) {
LOG_INFO( LOG_INFO(
"lazy check schema segment {} found newer schema version, current " "lazy check schema segment {} found newer schema version, "
"current "
"schema version {}, new schema version {}", "schema version {}, new schema version {}",
id_, id_,
schema_->get_schema_version(), schema_->get_schema_version(),
@ -2427,10 +2462,12 @@ ChunkedSegmentSealedImpl::FinishLoad() {
void void
ChunkedSegmentSealedImpl::fill_empty_field(const FieldMeta& field_meta) { ChunkedSegmentSealedImpl::fill_empty_field(const FieldMeta& field_meta) {
auto field_id = field_meta.get_id(); auto field_id = field_meta.get_id();
LOG_INFO("start fill empty field {} (data type {}) for sealed segment {}", LOG_INFO(
field_meta.get_data_type(), "start fill empty field {} (data type {}) for sealed segment "
field_id.get(), "{}",
id_); field_meta.get_data_type(),
field_id.get(),
id_);
int64_t size = num_rows_.value(); int64_t size = num_rows_.value();
AssertInfo(size > 0, "Chunked Sealed segment must have more than 0 row"); AssertInfo(size > 0, "Chunked Sealed segment must have more than 0 row");
auto field_data_info = FieldDataInfo(field_id.get(), size, ""); auto field_data_info = FieldDataInfo(field_id.get(), size, "");
@ -2470,10 +2507,12 @@ ChunkedSegmentSealedImpl::fill_empty_field(const FieldMeta& field_meta) {
fields_.wlock()->emplace(field_id, column); fields_.wlock()->emplace(field_id, column);
set_bit(field_data_ready_bitset_, field_id, true); set_bit(field_data_ready_bitset_, field_id, true);
LOG_INFO("fill empty field {} (data type {}) for growing segment {} done", LOG_INFO(
field_meta.get_data_type(), "fill empty field {} (data type {}) for growing segment {} "
field_id.get(), "done",
id_); field_meta.get_data_type(),
field_id.get(),
id_);
} }
} // namespace milvus::segcore } // namespace milvus::segcore

View File

@ -27,6 +27,7 @@
#include "SegmentSealed.h" #include "SegmentSealed.h"
#include "common/EasyAssert.h" #include "common/EasyAssert.h"
#include "common/Schema.h" #include "common/Schema.h"
#include "folly/Synchronized.h"
#include "google/protobuf/message_lite.h" #include "google/protobuf/message_lite.h"
#include "mmap/Types.h" #include "mmap/Types.h"
#include "common/Types.h" #include "common/Types.h"
@ -35,6 +36,7 @@
#include "cachinglayer/CacheSlot.h" #include "cachinglayer/CacheSlot.h"
#include "segcore/IndexConfigGenerator.h" #include "segcore/IndexConfigGenerator.h"
#include "segcore/SegcoreConfig.h" #include "segcore/SegcoreConfig.h"
#include "folly/concurrency/ConcurrentHashMap.h"
namespace milvus::segcore { namespace milvus::segcore {
@ -64,6 +66,9 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
void void
DropIndex(const FieldId field_id) override; DropIndex(const FieldId field_id) override;
void void
DropJSONIndex(const FieldId field_id,
const std::string& nested_path) override;
void
DropFieldData(const FieldId field_id) override; DropFieldData(const FieldId field_id) override;
bool bool
HasIndex(FieldId field_id) const override; HasIndex(FieldId field_id) const override;
@ -73,6 +78,25 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
std::pair<std::shared_ptr<ChunkedColumnInterface>, bool> std::pair<std::shared_ptr<ChunkedColumnInterface>, bool>
GetFieldDataIfExist(FieldId field_id) const; GetFieldDataIfExist(FieldId field_id) const;
std::vector<PinWrapper<const index::IndexBase*>>
PinIndex(FieldId field_id, bool include_ngram = false) const override {
auto [scalar_indexings, ngram_fields] =
lock(folly::wlock(scalar_indexings_), folly::wlock(ngram_fields_));
if (!include_ngram) {
if (ngram_fields->find(field_id) != ngram_fields->end()) {
return {};
}
}
auto iter = scalar_indexings->find(field_id);
if (iter == scalar_indexings->end()) {
return {};
}
auto ca = SemiInlineGet(iter->second->PinCells({0}));
auto index = ca->get_cell_of(0);
return {PinWrapper<const index::IndexBase*>(ca, index)};
}
bool bool
Contain(const PkType& pk) const override { Contain(const PkType& pk) const override {
return insert_record_.contain(pk); return insert_record_.contain(pk);
@ -126,21 +150,6 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
return iter->second.get(); return iter->second.get();
} }
bool
HasNgramIndex(FieldId field_id) const override {
std::shared_lock lck(mutex_);
return ngram_fields_.find(field_id) != ngram_fields_.end();
}
bool
HasNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const override {
std::shared_lock lck(mutex_);
return ngram_indexings_.find(field_id) != ngram_indexings_.end() &&
ngram_indexings_.at(field_id).find(nested_path) !=
ngram_indexings_.at(field_id).end();
}
PinWrapper<index::NgramInvertedIndex*> PinWrapper<index::NgramInvertedIndex*>
GetNgramIndex(FieldId field_id) const override; GetNgramIndex(FieldId field_id) const override;
@ -231,9 +240,6 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
callback) const; callback) const;
public: public:
int64_t
num_chunk_index(FieldId field_id) const override;
// count of chunk that has raw data // count of chunk that has raw data
int64_t int64_t
num_chunk_data(FieldId field_id) const override; num_chunk_data(FieldId field_id) const override;
@ -326,9 +332,6 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
int64_t chunk_id, int64_t chunk_id,
const FixedVector<int32_t>& offsets) const override; const FixedVector<int32_t>& offsets) const override;
PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id, int64_t chunk_id) const override;
// Calculate: output[i] = Vec[seg_offset[i]], // Calculate: output[i] = Vec[seg_offset[i]],
// where Vec is determined from field_offset // where Vec is determined from field_offset
void void
@ -501,17 +504,17 @@ class ChunkedSegmentSealedImpl : public SegmentSealed {
std::optional<int64_t> num_rows_; std::optional<int64_t> num_rows_;
// ngram indexings for json type // ngram indexings for json type
std::unordered_map< folly::Synchronized<std::unordered_map<
FieldId, FieldId,
std::unordered_map<std::string, index::CacheIndexBasePtr>> std::unordered_map<std::string, index::CacheIndexBasePtr>>>
ngram_indexings_; ngram_indexings_;
// fields that has ngram index // fields that has ngram index
std::unordered_set<FieldId> ngram_fields_{}; folly::Synchronized<std::unordered_set<FieldId>> ngram_fields_;
// scalar field index // scalar field index
std::unordered_map<FieldId, index::CacheIndexBasePtr> scalar_indexings_; folly::Synchronized<std::unordered_map<FieldId, index::CacheIndexBasePtr>>
scalar_indexings_;
// vector field index // vector field index
SealedIndexingRecord vector_indexings_; SealedIndexingRecord vector_indexings_;

View File

@ -18,24 +18,32 @@
namespace milvus::segcore { namespace milvus::segcore {
template <typename T> template <typename T>
MultipleChunkDataAccessor MultipleChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor(FieldId field_id, SegmentChunkReader::GetMultipleChunkDataAccessor(
bool index, FieldId field_id,
int64_t& current_chunk_id, int64_t& current_chunk_id,
int64_t& current_chunk_pos) const { int64_t& current_chunk_pos,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
const index::IndexBase* index = nullptr;
if (current_chunk_id < pinned_index.size()) {
index = pinned_index[current_chunk_id].get();
}
if (index) { if (index) {
auto pw = segment_->chunk_scalar_index<T>(field_id, current_chunk_id); auto index_ptr = dynamic_cast<const index::ScalarIndex<T>*>(index);
if (pw.get()->HasRawData()) { if (index_ptr->HasRawData()) {
return [&, pw = std::move(pw)]() -> const data_access_type { return
auto index = pw.get(); [&,
if (current_chunk_pos >= active_count_) { index_ptr = std::move(index_ptr)]() -> const data_access_type {
return std::nullopt; if (current_chunk_pos >= active_count_) {
} return std::nullopt;
auto raw = index->Reverse_Lookup(current_chunk_pos++); }
if (!raw.has_value()) { auto raw = index_ptr->Reverse_Lookup(current_chunk_pos++);
return std::nullopt; if (!raw.has_value()) {
} return std::nullopt;
return raw.value(); }
}; return raw.value();
};
} }
} }
// pw is captured by value, each time we need to access a new chunk, we need to // pw is captured by value, each time we need to access a new chunk, we need to
@ -69,21 +77,27 @@ SegmentChunkReader::GetChunkDataAccessor(FieldId field_id,
template <> template <>
MultipleChunkDataAccessor MultipleChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor<std::string>( SegmentChunkReader::GetMultipleChunkDataAccessor<std::string>(
FieldId field_id, FieldId field_id,
bool index,
int64_t& current_chunk_id, int64_t& current_chunk_id,
int64_t& current_chunk_pos) const { int64_t& current_chunk_pos,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
const index::IndexBase* index = nullptr;
if (current_chunk_id < pinned_index.size()) {
index = pinned_index[current_chunk_id].get();
}
if (index) { if (index) {
auto pw = segment_->chunk_scalar_index<std::string>(field_id, auto index_ptr =
current_chunk_id); dynamic_cast<const index::ScalarIndex<std::string>*>(index);
if (pw.get()->HasRawData()) { if (index_ptr->HasRawData()) {
return [&, pw = std::move(pw)]() mutable -> const data_access_type { return [&, index_ptr = std::move(index_ptr)]() mutable
auto index = pw.get(); -> const data_access_type {
if (current_chunk_pos >= active_count_) { if (current_chunk_pos >= active_count_) {
return std::nullopt; return std::nullopt;
} }
auto raw = index->Reverse_Lookup(current_chunk_pos++); auto raw = index_ptr->Reverse_Lookup(current_chunk_pos++);
if (!raw.has_value()) { if (!raw.has_value()) {
return std::nullopt; return std::nullopt;
} }
@ -156,37 +170,41 @@ SegmentChunkReader::GetChunkDataAccessor<std::string>(
} }
MultipleChunkDataAccessor MultipleChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor(DataType data_type, SegmentChunkReader::GetMultipleChunkDataAccessor(
FieldId field_id, DataType data_type,
bool index, FieldId field_id,
int64_t& current_chunk_id, int64_t& current_chunk_id,
int64_t& current_chunk_pos) const { int64_t& current_chunk_pos,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
switch (data_type) { switch (data_type) {
case DataType::BOOL: case DataType::BOOL:
return GetChunkDataAccessor<bool>( return GetMultipleChunkDataAccessor<bool>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::INT8: case DataType::INT8:
return GetChunkDataAccessor<int8_t>( return GetMultipleChunkDataAccessor<int8_t>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::INT16: case DataType::INT16:
return GetChunkDataAccessor<int16_t>( return GetMultipleChunkDataAccessor<int16_t>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::INT32: case DataType::INT32:
return GetChunkDataAccessor<int32_t>( return GetMultipleChunkDataAccessor<int32_t>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::INT64: case DataType::INT64:
return GetMultipleChunkDataAccessor<int64_t>(
field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::TIMESTAMPTZ: case DataType::TIMESTAMPTZ:
return GetChunkDataAccessor<int64_t>( return GetMultipleChunkDataAccessor<int64_t>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::FLOAT: case DataType::FLOAT:
return GetChunkDataAccessor<float>( return GetMultipleChunkDataAccessor<float>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::DOUBLE: case DataType::DOUBLE:
return GetChunkDataAccessor<double>( return GetMultipleChunkDataAccessor<double>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
case DataType::VARCHAR: { case DataType::VARCHAR: {
return GetChunkDataAccessor<std::string>( return GetMultipleChunkDataAccessor<std::string>(
field_id, index, current_chunk_id, current_chunk_pos); field_id, current_chunk_id, current_chunk_pos, pinned_index);
} }
default: default:
ThrowInfo(DataTypeInvalid, "unsupported data type: {}", data_type); ThrowInfo(DataTypeInvalid, "unsupported data type: {}", data_type);
@ -195,21 +213,23 @@ SegmentChunkReader::GetChunkDataAccessor(DataType data_type,
template <typename T> template <typename T>
ChunkDataAccessor ChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor(FieldId field_id, SegmentChunkReader::GetChunkDataAccessor(
int chunk_id, FieldId field_id,
int data_barrier) const { int chunk_id,
int data_barrier,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
if (chunk_id >= data_barrier) { if (chunk_id >= data_barrier) {
auto pw = segment_->chunk_scalar_index<T>(field_id, chunk_id); auto index = pinned_index[chunk_id].get();
if (pw.get()->HasRawData()) { auto index_ptr = dynamic_cast<const index::ScalarIndex<T>*>(index);
return if (index->HasRawData()) {
[pw = std::move(pw)](int i) mutable -> const data_access_type { return [index_ptr](int i) mutable -> const data_access_type {
auto index = pw.get(); auto raw = index_ptr->Reverse_Lookup(i);
auto raw = index->Reverse_Lookup(i); if (!raw.has_value()) {
if (!raw.has_value()) { return std::nullopt;
return std::nullopt; }
} return raw.value();
return raw.value(); };
};
} }
} }
auto pw = segment_->chunk_data<T>(field_id, chunk_id); auto pw = segment_->chunk_data<T>(field_id, chunk_id);
@ -226,22 +246,24 @@ SegmentChunkReader::GetChunkDataAccessor(FieldId field_id,
template <> template <>
ChunkDataAccessor ChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor<std::string>(FieldId field_id, SegmentChunkReader::GetChunkDataAccessor<std::string>(
int chunk_id, FieldId field_id,
int data_barrier) const { int chunk_id,
int data_barrier,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
if (chunk_id >= data_barrier) { if (chunk_id >= data_barrier) {
auto pw = segment_->chunk_scalar_index<std::string>(field_id, chunk_id); auto index = pinned_index[chunk_id].get();
auto indexing = pw.get(); auto index_ptr =
if (indexing->HasRawData()) { dynamic_cast<const index::ScalarIndex<std::string>*>(index);
return if (index_ptr->HasRawData()) {
[pw = std::move(pw)](int i) mutable -> const data_access_type { return [index_ptr](int i) mutable -> const data_access_type {
auto index = pw.get(); auto raw = index_ptr->Reverse_Lookup(i);
auto raw = index->Reverse_Lookup(i); if (!raw.has_value()) {
if (!raw.has_value()) { return std::nullopt;
return std::nullopt; }
} return raw.value();
return raw.value(); };
};
} }
} }
if (segment_->type() == SegmentType::Growing && if (segment_->type() == SegmentType::Growing &&
@ -271,36 +293,40 @@ SegmentChunkReader::GetChunkDataAccessor<std::string>(FieldId field_id,
} }
ChunkDataAccessor ChunkDataAccessor
SegmentChunkReader::GetChunkDataAccessor(DataType data_type, SegmentChunkReader::GetChunkDataAccessor(
FieldId field_id, DataType data_type,
int chunk_id, FieldId field_id,
int data_barrier) const { int chunk_id,
int data_barrier,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const {
switch (data_type) { switch (data_type) {
case DataType::BOOL: case DataType::BOOL:
return GetChunkDataAccessor<bool>(field_id, chunk_id, data_barrier); return GetChunkDataAccessor<bool>(
field_id, chunk_id, data_barrier, pinned_index);
case DataType::INT8: case DataType::INT8:
return GetChunkDataAccessor<int8_t>( return GetChunkDataAccessor<int8_t>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::INT16: case DataType::INT16:
return GetChunkDataAccessor<int16_t>( return GetChunkDataAccessor<int16_t>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::INT32: case DataType::INT32:
return GetChunkDataAccessor<int32_t>( return GetChunkDataAccessor<int32_t>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::TIMESTAMPTZ: case DataType::TIMESTAMPTZ:
case DataType::INT64: case DataType::INT64:
return GetChunkDataAccessor<int64_t>( return GetChunkDataAccessor<int64_t>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::FLOAT: case DataType::FLOAT:
return GetChunkDataAccessor<float>( return GetChunkDataAccessor<float>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::DOUBLE: case DataType::DOUBLE:
return GetChunkDataAccessor<double>( return GetChunkDataAccessor<double>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
case DataType::VARCHAR: case DataType::VARCHAR:
case DataType::TEXT: { case DataType::TEXT: {
return GetChunkDataAccessor<std::string>( return GetChunkDataAccessor<std::string>(
field_id, chunk_id, data_barrier); field_id, chunk_id, data_barrier, pinned_index);
} }
default: default:
ThrowInfo(DataTypeInvalid, "unsupported data type: {}", data_type); ThrowInfo(DataTypeInvalid, "unsupported data type: {}", data_type);

View File

@ -46,17 +46,21 @@ class SegmentChunkReader {
} }
MultipleChunkDataAccessor MultipleChunkDataAccessor
GetChunkDataAccessor(DataType data_type, GetMultipleChunkDataAccessor(
FieldId field_id, DataType data_type,
bool index, FieldId field_id,
int64_t& current_chunk_id, int64_t& current_chunk_id,
int64_t& current_chunk_pos) const; int64_t& current_chunk_pos,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const;
ChunkDataAccessor ChunkDataAccessor
GetChunkDataAccessor(DataType data_type, GetChunkDataAccessor(DataType data_type,
FieldId field_id, FieldId field_id,
int chunk_id, int chunk_id,
int data_barrier) const; int data_barrier,
const std::vector<PinWrapper<const index::IndexBase*>>&
pinned_index) const;
void void
MoveCursorForMultipleChunk(int64_t& current_chunk_id, MoveCursorForMultipleChunk(int64_t& current_chunk_id,
@ -118,16 +122,20 @@ class SegmentChunkReader {
private: private:
template <typename T> template <typename T>
MultipleChunkDataAccessor MultipleChunkDataAccessor
GetChunkDataAccessor(FieldId field_id, GetMultipleChunkDataAccessor(
bool index, FieldId field_id,
int64_t& current_chunk_id, int64_t& current_chunk_id,
int64_t& current_chunk_pos) const; int64_t& current_chunk_pos,
const std::vector<PinWrapper<const index::IndexBase*>>& pinned_index)
const;
template <typename T> template <typename T>
ChunkDataAccessor ChunkDataAccessor
GetChunkDataAccessor(FieldId field_id, GetChunkDataAccessor(FieldId field_id,
int chunk_id, int chunk_id,
int data_barrier) const; int data_barrier,
const std::vector<PinWrapper<const index::IndexBase*>>&
pinned_index) const;
const int64_t size_per_chunk_; const int64_t size_per_chunk_;
}; };

View File

@ -131,7 +131,7 @@ class SegmentGrowingImpl : public SegmentGrowing {
// return count of index that has index, i.e., [0, num_chunk_index) have built index // return count of index that has index, i.e., [0, num_chunk_index) have built index
int64_t int64_t
num_chunk_index(FieldId field_id) const final { num_chunk_index(FieldId field_id) const {
return indexing_record_.get_finished_ack(); return indexing_record_.get_finished_ack();
} }
@ -144,7 +144,7 @@ class SegmentGrowingImpl : public SegmentGrowing {
// deprecated // deprecated
PinWrapper<const index::IndexBase*> PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id, int64_t chunk_id) const final { chunk_index_impl(FieldId field_id, int64_t chunk_id) const {
return PinWrapper<const index::IndexBase*>( return PinWrapper<const index::IndexBase*>(
indexing_record_.get_field_indexing(field_id) indexing_record_.get_field_indexing(field_id)
.get_chunk_indexing(chunk_id) .get_chunk_indexing(chunk_id)
@ -344,7 +344,7 @@ class SegmentGrowingImpl : public SegmentGrowing {
search_ids(const IdArray& id_array, Timestamp timestamp) const override; search_ids(const IdArray& id_array, Timestamp timestamp) const override;
bool bool
HasIndex(FieldId field_id) const override { HasIndex(FieldId field_id) const {
auto& field_meta = schema_->operator[](field_id); auto& field_meta = schema_->operator[](field_id);
if (IsVectorDataType(field_meta.get_data_type()) && if (IsVectorDataType(field_meta.get_data_type()) &&
indexing_record_.SyncDataWithIndex(field_id)) { indexing_record_.SyncDataWithIndex(field_id)) {
@ -354,14 +354,18 @@ class SegmentGrowingImpl : public SegmentGrowing {
return false; return false;
} }
bool std::vector<PinWrapper<const index::IndexBase*>>
HasIndex(FieldId field_id, PinIndex(FieldId field_id, bool include_ngram = false) const override {
const std::string& nested_path, if (!HasIndex(field_id)) {
DataType data_type, return {};
bool any_type = false, }
bool is_array = false) const override { auto num_chunk = num_chunk_index(field_id);
return false; std::vector<PinWrapper<const index::IndexBase*>> indexes;
}; for (int64_t i = 0; i < num_chunk; i++) {
indexes.push_back(chunk_index_impl(field_id, i));
}
return indexes;
}
bool bool
HasFieldData(FieldId field_id) const override { HasFieldData(FieldId field_id) const override {

View File

@ -561,17 +561,6 @@ SegmentInternalInterface::GetNgramIndexForJson(
return PinWrapper<index::NgramInvertedIndex*>(nullptr); return PinWrapper<index::NgramInvertedIndex*>(nullptr);
} }
bool
SegmentInternalInterface::HasNgramIndex(FieldId field_id) const {
return false;
}
bool
SegmentInternalInterface::HasNgramIndexForJson(
FieldId field_id, const std::string& nested_path) const {
return false;
}
index::JsonKeyStats* index::JsonKeyStats*
SegmentInternalInterface::GetJsonStats(FieldId field_id) const { SegmentInternalInterface::GetJsonStats(FieldId field_id) const {
std::shared_lock lock(mutex_); std::shared_lock lock(mutex_);

View File

@ -132,6 +132,9 @@ class SegmentInterface {
virtual bool virtual bool
HasRawData(int64_t field_id) const = 0; HasRawData(int64_t field_id) const = 0;
virtual bool
HasFieldData(FieldId field_id) const = 0;
virtual bool virtual bool
is_nullable(FieldId field_id) const = 0; is_nullable(FieldId field_id) const = 0;
@ -141,11 +144,20 @@ class SegmentInterface {
virtual index::TextMatchIndex* virtual index::TextMatchIndex*
GetTextIndex(FieldId field_id) const = 0; GetTextIndex(FieldId field_id) const = 0;
virtual PinWrapper<index::IndexBase*> virtual std::vector<PinWrapper<const index::IndexBase*>>
GetJsonIndex(FieldId field_id, std::string path) const { PinJsonIndex(FieldId field_id,
return nullptr; const std::string& path,
DataType data_type,
bool any_type,
bool is_array) const {
return {};
} }
virtual std::vector<PinWrapper<const index::IndexBase*>>
PinIndex(FieldId field_id, bool include_ngram = false) const {
return {};
};
virtual void virtual void
BulkGetJsonData(FieldId field_id, BulkGetJsonData(FieldId field_id,
std::function<void(milvus::Json, size_t, bool)> fn, std::function<void(milvus::Json, size_t, bool)> fn,
@ -159,13 +171,6 @@ class SegmentInterface {
GetNgramIndexForJson(FieldId field_id, GetNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const = 0; const std::string& nested_path) const = 0;
virtual bool
HasNgramIndex(FieldId field_id) const = 0;
virtual bool
HasNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const = 0;
virtual index::JsonKeyStats* virtual index::JsonKeyStats*
GetJsonStats(FieldId field_id) const = 0; GetJsonStats(FieldId field_id) const = 0;
@ -262,28 +267,6 @@ class SegmentInternalInterface : public SegmentInterface {
} }
} }
template <typename T>
PinWrapper<const index::ScalarIndex<T>*>
chunk_scalar_index(FieldId field_id, int64_t chunk_id) const {
static_assert(IsScalar<T>);
using IndexType = index::ScalarIndex<T>;
auto pw = chunk_index_impl(field_id, chunk_id);
auto ptr = dynamic_cast<const IndexType*>(pw.get());
AssertInfo(ptr, "entry mismatch");
return PinWrapper<const index::ScalarIndex<T>*>(pw, ptr);
}
// We should not expose this interface directly, but access the index through chunk_scalar_index.
// However, chunk_scalar_index requires specifying a template parameter, which makes it impossible to return JsonFlatIndex.
// A better approach would be to have chunk_scalar_index return a pointer to a base class,
// and then use dynamic_cast to convert it. But this would cause a lot of code changes, so for now, we will do it this way.
PinWrapper<const index::IndexBase*>
chunk_json_index(FieldId field_id,
std::string& json_path,
int64_t chunk_id) const {
return chunk_index_impl(field_id, json_path, chunk_id);
}
// union(segment_id, field_id) as unique id // union(segment_id, field_id) as unique id
virtual std::string virtual std::string
GetUniqueFieldId(int64_t field_id) const { GetUniqueFieldId(int64_t field_id) const {
@ -291,18 +274,6 @@ class SegmentInternalInterface : public SegmentInterface {
std::to_string(field_id); std::to_string(field_id);
} }
template <typename T>
PinWrapper<const index::ScalarIndex<T>*>
chunk_scalar_index(FieldId field_id,
std::string path,
int64_t chunk_id) const {
using IndexType = index::ScalarIndex<T>;
auto pw = chunk_index_impl(field_id, path, chunk_id);
auto ptr = dynamic_cast<const IndexType*>(pw.get());
AssertInfo(ptr, "entry mismatch");
return PinWrapper<const index::ScalarIndex<T>*>(pw, ptr);
}
std::unique_ptr<SearchResult> std::unique_ptr<SearchResult>
Search(const query::Plan* Plan, Search(const query::Plan* Plan,
const query::PlaceholderGroup* placeholder_group, const query::PlaceholderGroup* placeholder_group,
@ -336,16 +307,6 @@ class SegmentInternalInterface : public SegmentInterface {
virtual bool virtual bool
HasIndex(FieldId field_id) const = 0; HasIndex(FieldId field_id) const = 0;
virtual bool
HasIndex(FieldId field_id,
const std::string& nested_path,
DataType data_type,
bool any_type = false,
bool is_array = false) const = 0;
virtual bool
HasFieldData(FieldId field_id) const = 0;
virtual std::string virtual std::string
debug() const = 0; debug() const = 0;
@ -387,13 +348,6 @@ class SegmentInternalInterface : public SegmentInterface {
GetNgramIndexForJson(FieldId field_id, GetNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const override; const std::string& nested_path) const override;
virtual bool
HasNgramIndex(FieldId field_id) const override;
virtual bool
HasNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const override;
virtual index::JsonKeyStats* virtual index::JsonKeyStats*
GetJsonStats(FieldId field_id) const override; GetJsonStats(FieldId field_id) const override;
@ -416,10 +370,6 @@ class SegmentInternalInterface : public SegmentInterface {
int64_t ins_barrier, int64_t ins_barrier,
Timestamp timestamp) const = 0; Timestamp timestamp) const = 0;
// count of chunk that has index available
virtual int64_t
num_chunk_index(FieldId field_id) const = 0;
// count of chunk that has raw data // count of chunk that has raw data
virtual int64_t virtual int64_t
num_chunk_data(FieldId field_id) const = 0; num_chunk_data(FieldId field_id) const = 0;
@ -557,9 +507,6 @@ class SegmentInternalInterface : public SegmentInterface {
int64_t chunk_id, int64_t chunk_id,
const FixedVector<int32_t>& offsets) const = 0; const FixedVector<int32_t>& offsets) const = 0;
// internal API: return chunk_index in span, support scalar index only
virtual PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id, int64_t chunk_id) const = 0;
virtual void virtual void
check_search(const query::Plan* plan) const = 0; check_search(const query::Plan* plan) const = 0;
@ -567,13 +514,6 @@ class SegmentInternalInterface : public SegmentInterface {
get_timestamps() const = 0; get_timestamps() const = 0;
public: public:
virtual PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id,
const std::string& path,
int64_t chunk_id) const {
ThrowInfo(ErrorCode::NotImplemented, "not implemented");
};
virtual bool virtual bool
is_field_exist(FieldId field_id) const = 0; is_field_exist(FieldId field_id) const = 0;
// calculate output[i] = Vec[seg_offsets[i]}, where Vec binds to system_type // calculate output[i] = Vec[seg_offsets[i]}, where Vec binds to system_type

View File

@ -39,6 +39,8 @@ class SegmentSealed : public SegmentInternalInterface {
virtual void virtual void
DropIndex(const FieldId field_id) = 0; DropIndex(const FieldId field_id) = 0;
virtual void virtual void
DropJSONIndex(const FieldId field_id, const std::string& nested_path) = 0;
virtual void
DropFieldData(const FieldId field_id) = 0; DropFieldData(const FieldId field_id) = 0;
virtual void virtual void
@ -57,55 +59,70 @@ class SegmentSealed : public SegmentInternalInterface {
virtual InsertRecord<true>& virtual InsertRecord<true>&
get_insert_record() = 0; get_insert_record() = 0;
virtual PinWrapper<index::IndexBase*> virtual std::vector<PinWrapper<const index::IndexBase*>>
GetJsonIndex(FieldId field_id, std::string path) const override { PinJsonIndex(FieldId field_id,
const std::string& path,
DataType data_type,
bool any_type,
bool is_array) const override {
int path_len_diff = std::numeric_limits<int>::max(); int path_len_diff = std::numeric_limits<int>::max();
index::CacheIndexBasePtr best_match = nullptr; index::CacheIndexBasePtr best_match = nullptr;
std::string_view path_view = path; std::string_view path_view = path;
for (const auto& index : json_indices) { auto res = json_indices.withRLock(
if (index.field_id != field_id) { [&](auto vec) -> PinWrapper<const index::IndexBase*> {
continue; for (const auto& index : vec) {
} if (index.field_id != field_id) {
switch (index.cast_type.data_type()) {
case JsonCastType::DataType::JSON:
if (path_view.length() < index.nested_path.length()) {
continue; continue;
} }
if (path_view.substr(0, index.nested_path.length()) == switch (index.cast_type.data_type()) {
index.nested_path) { case JsonCastType::DataType::JSON:
int current_len_diff = if (path_view.length() <
path_view.length() - index.nested_path.length(); index.nested_path.length()) {
if (current_len_diff < path_len_diff) { continue;
path_len_diff = current_len_diff; }
best_match = index.index; if (path_view.substr(0,
} index.nested_path.length()) ==
if (path_len_diff == 0) { index.nested_path) {
int current_len_diff =
path_view.length() -
index.nested_path.length();
if (current_len_diff < path_len_diff) {
path_len_diff = current_len_diff;
best_match = index.index;
}
if (path_len_diff == 0) {
break;
}
}
break; break;
} default:
if (index.nested_path != path) {
continue;
}
if (any_type) {
best_match = index.index;
break;
}
if (milvus::index::json::IsDataTypeSupported(
index.cast_type, data_type, is_array)) {
best_match = index.index;
break;
}
} }
break; }
default: if (best_match == nullptr) {
if (index.nested_path == path) { return nullptr;
best_match = index.index; }
break; auto ca = SemiInlineGet(best_match->PinCells({0}));
} auto index = ca->get_cell_of(0);
} return PinWrapper<const index::IndexBase*>(ca, index);
});
if (res.get() == nullptr) {
return {};
} }
if (best_match == nullptr) { return {res};
return nullptr;
}
auto ca = SemiInlineGet(best_match->PinCells({0}));
auto index = ca->get_cell_of(0);
return PinWrapper<index::IndexBase*>(ca, index);
} }
virtual bool
HasNgramIndex(FieldId field_id) const = 0;
virtual bool
HasNgramIndexForJson(FieldId field_id,
const std::string& nested_path) const = 0;
virtual PinWrapper<index::NgramInvertedIndex*> virtual PinWrapper<index::NgramInvertedIndex*>
GetNgramIndex(FieldId field_id) const override = 0; GetNgramIndex(FieldId field_id) const override = 0;
@ -124,52 +141,7 @@ class SegmentSealed : public SegmentInternalInterface {
return SegmentType::Sealed; return SegmentType::Sealed;
} }
virtual bool
HasIndex(FieldId field_id) const override = 0;
bool
HasIndex(FieldId field_id,
const std::string& path,
DataType data_type,
bool any_type = false,
bool is_json_contain = false) const override {
auto it = std::find_if(
json_indices.begin(),
json_indices.end(),
[field_id, path, data_type, any_type, is_json_contain](
const JsonIndex& index) {
if (index.field_id != field_id) {
return false;
}
if (index.cast_type.data_type() ==
JsonCastType::DataType::JSON) {
// for json flat index, path should be a subpath of nested_path
return path.substr(0, index.nested_path.length()) ==
index.nested_path;
}
if (index.nested_path != path) {
return false;
}
if (any_type) {
return true;
}
return milvus::index::json::IsDataTypeSupported(
index.cast_type, data_type, is_json_contain);
});
return it != json_indices.end();
}
protected: protected:
virtual PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id, int64_t chunk_id) const override = 0;
PinWrapper<const index::IndexBase*>
chunk_index_impl(FieldId field_id,
const std::string& path,
int64_t chunk_id) const override {
return GetJsonIndex(field_id, path)
.template transform<const index::IndexBase*>(
[](auto&& index) { return index; });
}
struct JsonIndex { struct JsonIndex {
FieldId field_id; FieldId field_id;
std::string nested_path; std::string nested_path;
@ -177,7 +149,7 @@ class SegmentSealed : public SegmentInternalInterface {
index::CacheIndexBasePtr index; index::CacheIndexBasePtr index;
}; };
std::vector<JsonIndex> json_indices; folly::Synchronized<std::vector<JsonIndex>> json_indices;
}; };
using SegmentSealedSPtr = std::shared_ptr<SegmentSealed>; using SegmentSealedSPtr = std::shared_ptr<SegmentSealed>;

View File

@ -95,6 +95,14 @@ AppendLoadFieldDataPath(CLoadFieldDataInfo c_load_field_data_info,
} }
} }
void
AppendWarmupPolicy(CLoadFieldDataInfo c_load_field_data_info,
CacheWarmupPolicy warmup_policy) {
auto load_field_data_info =
static_cast<LoadFieldDataInfo*>(c_load_field_data_info);
load_field_data_info->warmup_policy = warmup_policy;
}
void void
SetStorageVersion(CLoadFieldDataInfo c_load_field_data_info, SetStorageVersion(CLoadFieldDataInfo c_load_field_data_info,
int64_t storage_version) { int64_t storage_version) {

View File

@ -44,6 +44,10 @@ AppendLoadFieldDataPath(CLoadFieldDataInfo c_load_field_data_info,
int64_t memory_size, int64_t memory_size,
const char* file_path); const char* file_path);
void
AppendWarmupPolicy(CLoadFieldDataInfo c_load_field_data_info,
CacheWarmupPolicy warmup_policy);
void void
SetStorageVersion(CLoadFieldDataInfo c_load_field_data_info, SetStorageVersion(CLoadFieldDataInfo c_load_field_data_info,
int64_t storage_version); int64_t storage_version);

View File

@ -310,6 +310,15 @@ HasRawData(CSegmentInterface c_segment, int64_t field_id) {
return segment->HasRawData(field_id); return segment->HasRawData(field_id);
} }
bool
HasFieldData(CSegmentInterface c_segment, int64_t field_id) {
SCOPE_CGO_CALL_METRIC();
auto segment =
reinterpret_cast<milvus::segcore::SegmentInterface*>(c_segment);
return segment->HasFieldData(milvus::FieldId(field_id));
}
////////////////////////////// interfaces for growing segment ////////////////////////////// ////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus CStatus
Insert(CSegmentInterface c_segment, Insert(CSegmentInterface c_segment,
@ -617,6 +626,25 @@ DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id) {
} }
} }
CStatus
DropSealedSegmentJSONIndex(CSegmentInterface c_segment,
int64_t field_id,
const char* nested_path) {
SCOPE_CGO_CALL_METRIC();
try {
auto segment_interface =
reinterpret_cast<milvus::segcore::SegmentInterface*>(c_segment);
auto segment =
dynamic_cast<milvus::segcore::SegmentSealed*>(segment_interface);
AssertInfo(segment != nullptr, "segment conversion failed");
segment->DropJSONIndex(milvus::FieldId(field_id), nested_path);
return milvus::SuccessCStatus();
} catch (std::exception& e) {
return milvus::FailureCStatus(&e);
}
}
CStatus CStatus
AddFieldDataInfoForSealed(CSegmentInterface c_segment, AddFieldDataInfoForSealed(CSegmentInterface c_segment,
CLoadFieldDataInfo c_load_field_data_info) { CLoadFieldDataInfo c_load_field_data_info) {

View File

@ -89,6 +89,9 @@ GetRealCount(CSegmentInterface c_segment);
bool bool
HasRawData(CSegmentInterface c_segment, int64_t field_id); HasRawData(CSegmentInterface c_segment, int64_t field_id);
bool
HasFieldData(CSegmentInterface c_segment, int64_t field_id);
////////////////////////////// interfaces for growing segment ////////////////////////////// ////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus CStatus
Insert(CSegmentInterface c_segment, Insert(CSegmentInterface c_segment,
@ -140,6 +143,11 @@ DropFieldData(CSegmentInterface c_segment, int64_t field_id);
CStatus CStatus
DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id); DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id);
CStatus
DropSealedSegmentJSONIndex(CSegmentInterface c_segment,
int64_t field_id,
const char* nested_path);
CStatus CStatus
AddFieldDataInfoForSealed(CSegmentInterface c_segment, AddFieldDataInfoForSealed(CSegmentInterface c_segment,
CLoadFieldDataInfo c_load_field_data_info); CLoadFieldDataInfo c_load_field_data_info);

View File

@ -58,10 +58,11 @@ class V1SealedIndexTranslator : public Translator<milvus::index::IndexBase> {
int64_t index_build_id; int64_t index_build_id;
int64_t index_version; int64_t index_version;
}; };
std::unique_ptr<milvus::index::IndexBase>
index::IndexBasePtr
LoadVecIndex(); LoadVecIndex();
std::unique_ptr<milvus::index::IndexBase> index::IndexBasePtr
LoadScalarIndex(); LoadScalarIndex();
std::string key_; std::string key_;

View File

@ -287,9 +287,6 @@ TEST_P(BinlogIndexTest, AccuracyWithLoadFieldData) {
ASSERT_NO_THROW(segment->LoadIndex(load_info)); ASSERT_NO_THROW(segment->LoadIndex(load_info));
EXPECT_TRUE(segment->HasIndex(vec_field_id)); EXPECT_TRUE(segment->HasIndex(vec_field_id));
EXPECT_EQ(segment->get_row_count(), data_n); EXPECT_EQ(segment->get_row_count(), data_n);
// only INDEX_FAISS_IVFFLAT has raw data, thus it should release the raw field data.
EXPECT_EQ(segment->HasFieldData(vec_field_id),
index_type != knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
auto ivf_sr = segment->Search(plan.get(), ph_group.get(), 1L << 63, 0); auto ivf_sr = segment->Search(plan.get(), ph_group.get(), 1L << 63, 0);
auto similary = GetKnnSearchRecall(num_queries, auto similary = GetKnnSearchRecall(num_queries,
binlog_index_sr->seg_offsets_.data(), binlog_index_sr->seg_offsets_.data(),
@ -386,8 +383,6 @@ TEST_P(BinlogIndexTest, AccuracyWithMapFieldData) {
ASSERT_NO_THROW(segment->LoadIndex(load_info)); ASSERT_NO_THROW(segment->LoadIndex(load_info));
EXPECT_TRUE(segment->HasIndex(vec_field_id)); EXPECT_TRUE(segment->HasIndex(vec_field_id));
EXPECT_EQ(segment->get_row_count(), data_n); EXPECT_EQ(segment->get_row_count(), data_n);
EXPECT_EQ(segment->HasFieldData(vec_field_id),
index_type != knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
auto ivf_sr = segment->Search(plan.get(), ph_group.get(), 1L << 63); auto ivf_sr = segment->Search(plan.get(), ph_group.get(), 1L << 63);
auto similary = GetKnnSearchRecall(num_queries, auto similary = GetKnnSearchRecall(num_queries,
binlog_index_sr->seg_offsets_.data(), binlog_index_sr->seg_offsets_.data(),
@ -436,8 +431,6 @@ TEST_P(BinlogIndexTest, DisableInterimIndex) {
ASSERT_NO_THROW(segment->LoadIndex(load_info)); ASSERT_NO_THROW(segment->LoadIndex(load_info));
EXPECT_TRUE(segment->HasIndex(vec_field_id)); EXPECT_TRUE(segment->HasIndex(vec_field_id));
EXPECT_EQ(segment->get_row_count(), data_n); EXPECT_EQ(segment->get_row_count(), data_n);
EXPECT_EQ(segment->HasFieldData(vec_field_id),
index_type != knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
} }
TEST_P(BinlogIndexTest, LoadBingLogWihIDMAP) { TEST_P(BinlogIndexTest, LoadBingLogWihIDMAP) {

View File

@ -211,6 +211,11 @@ test_ngram_with_data(const boost::container::vector<std::string>& data,
BitsetType final; BitsetType final;
final = ExecuteQueryExpr(parsed, segment.get(), nb, MAX_TIMESTAMP); final = ExecuteQueryExpr(parsed, segment.get(), nb, MAX_TIMESTAMP);
for (size_t i = 0; i < nb; i++) { for (size_t i = 0; i < nb; i++) {
if (final[i] != expected_result[i]) {
std::cout << "final[" << i << "] = " << final[i]
<< ", expected_result[" << i
<< "] = " << expected_result[i] << std::endl;
}
ASSERT_EQ(final[i], expected_result[i]); ASSERT_EQ(final[i], expected_result[i]);
} }
} }

View File

@ -573,8 +573,8 @@ TEST(Sealed, LoadFieldData) {
segment->LoadIndex(vec_info); segment->LoadIndex(vec_info);
ASSERT_EQ(segment->num_chunk(fakevec_id), 1); ASSERT_EQ(segment->num_chunk(fakevec_id), 1);
ASSERT_EQ(segment->num_chunk_index(double_id), 0); ASSERT_EQ(segment->PinIndex(double_id).size(), 0);
ASSERT_EQ(segment->num_chunk_index(str_id), 0); ASSERT_EQ(segment->PinIndex(str_id).size(), 0);
auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0); auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0);
auto chunk_span2 = segment->chunk_data<double>(double_id, 0); auto chunk_span2 = segment->chunk_data<double>(double_id, 0);
auto chunk_span3 = auto chunk_span3 =
@ -654,9 +654,6 @@ TEST(Sealed, LoadFieldData) {
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp); auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
auto json = SearchResultToJson(*sr); auto json = SearchResultToJson(*sr);
std::cout << json.dump(1); std::cout << json.dump(1);
segment->DropIndex(fakevec_id);
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
} }
TEST(Sealed, ClearData) { TEST(Sealed, ClearData) {
@ -737,8 +734,8 @@ TEST(Sealed, ClearData) {
segment->LoadIndex(vec_info); segment->LoadIndex(vec_info);
ASSERT_EQ(segment->num_chunk(fakevec_id), 1); ASSERT_EQ(segment->num_chunk(fakevec_id), 1);
ASSERT_EQ(segment->num_chunk_index(double_id), 0); ASSERT_EQ(segment->PinIndex(double_id).size(), 0);
ASSERT_EQ(segment->num_chunk_index(str_id), 0); ASSERT_EQ(segment->PinIndex(str_id).size(), 0);
auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0); auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0);
auto chunk_span2 = segment->chunk_data<double>(double_id, 0); auto chunk_span2 = segment->chunk_data<double>(double_id, 0);
auto chunk_span3 = auto chunk_span3 =
@ -842,8 +839,8 @@ TEST(Sealed, LoadFieldDataMmap) {
segment->LoadIndex(vec_info); segment->LoadIndex(vec_info);
ASSERT_EQ(segment->num_chunk(fakevec_id), 1); ASSERT_EQ(segment->num_chunk(fakevec_id), 1);
ASSERT_EQ(segment->num_chunk_index(double_id), 0); ASSERT_EQ(segment->PinIndex(double_id).size(), 0);
ASSERT_EQ(segment->num_chunk_index(str_id), 0); ASSERT_EQ(segment->PinIndex(str_id).size(), 0);
auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0); auto chunk_span1 = segment->chunk_data<int64_t>(counter_id, 0);
auto chunk_span2 = segment->chunk_data<double>(double_id, 0); auto chunk_span2 = segment->chunk_data<double>(double_id, 0);
auto chunk_span3 = auto chunk_span3 =
@ -861,9 +858,6 @@ TEST(Sealed, LoadFieldDataMmap) {
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp); auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
auto json = SearchResultToJson(*sr); auto json = SearchResultToJson(*sr);
std::cout << json.dump(1); std::cout << json.dump(1);
segment->DropIndex(fakevec_id);
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
} }
TEST(Sealed, LoadPkScalarIndex) { TEST(Sealed, LoadPkScalarIndex) {

View File

@ -29,6 +29,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/parser/planparserv2" "github.com/milvus-io/milvus/internal/parser/planparserv2"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/indexparamcheck" "github.com/milvus-io/milvus/internal/util/indexparamcheck"
"github.com/milvus-io/milvus/pkg/v2/common" "github.com/milvus-io/milvus/pkg/v2/common"
pkgcommon "github.com/milvus-io/milvus/pkg/v2/common" pkgcommon "github.com/milvus-io/milvus/pkg/v2/common"
@ -37,6 +38,7 @@ import (
"github.com/milvus-io/milvus/pkg/v2/proto/datapb" "github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb" "github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
"github.com/milvus-io/milvus/pkg/v2/proto/planpb" "github.com/milvus-io/milvus/pkg/v2/proto/planpb"
"github.com/milvus-io/milvus/pkg/v2/proto/querypb"
"github.com/milvus-io/milvus/pkg/v2/util/funcutil" "github.com/milvus-io/milvus/pkg/v2/util/funcutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr" "github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/metautil" "github.com/milvus-io/milvus/pkg/v2/util/metautil"
@ -872,6 +874,23 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
}, nil }, nil
} }
func isCollectionLoaded(ctx context.Context, mc types.MixCoord, collID int64) (bool, error) {
// get all loading collections
resp, err := mc.ShowLoadCollections(ctx, &querypb.ShowCollectionsRequest{
CollectionIDs: []int64{collID},
})
if merr.CheckRPCCall(resp, err) != nil {
return false, err
}
for _, loadedCollID := range resp.GetCollectionIDs() {
if collID == loadedCollID {
return true, nil
}
}
return false, nil
}
// DropIndex deletes indexes based on IndexName. One IndexName corresponds to the index of an entire column. A column is // DropIndex deletes indexes based on IndexName. One IndexName corresponds to the index of an entire column. A column is
// divided into many segments, and each segment corresponds to an IndexBuildID. DataCoord uses IndexBuildID to record // divided into many segments, and each segment corresponds to an IndexBuildID. DataCoord uses IndexBuildID to record
// index tasks. // index tasks.
@ -893,6 +912,30 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName)) log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName))
return merr.Success(), nil return merr.Success(), nil
} }
// we do not support drop vector index on loaded collection
loaded, err := isCollectionLoaded(ctx, s.mixCoord, req.GetCollectionID())
if err != nil {
log.Warn("fail to check if collection is loaded", zap.String("indexName", req.IndexName), zap.Int64("collectionID", req.GetCollectionID()), zap.Error(err))
return merr.Status(err), nil
}
if loaded {
schema, err := s.getSchema(ctx, req.GetCollectionID())
if err != nil {
return merr.Status(err), nil
}
// check if there is any vector index to drop
for _, index := range indexes {
field := typeutil.GetField(schema, index.FieldID)
if field == nil {
log.Warn("field not found", zap.String("indexName", req.IndexName), zap.Int64("collectionID", req.GetCollectionID()), zap.Int64("fieldID", index.FieldID))
return merr.Status(merr.WrapErrFieldNotFound(index.FieldID)), nil
}
if typeutil.IsVectorType(field.GetDataType()) {
log.Warn("vector index cannot be dropped on loaded collection", zap.String("indexName", req.IndexName), zap.Int64("collectionID", req.GetCollectionID()), zap.Int64("fieldID", index.FieldID))
return merr.Status(merr.WrapErrParameterInvalidMsg(fmt.Sprintf("vector index cannot be dropped on loaded collection: %d", req.GetCollectionID()))), nil
}
}
}
if !req.GetDropAll() && len(indexes) > 1 { if !req.GetDropAll() && len(indexes) > 1 {
log.Warn(msgAmbiguousIndexName()) log.Warn(msgAmbiguousIndexName())

View File

@ -44,6 +44,7 @@ import (
"github.com/milvus-io/milvus/pkg/v2/common" "github.com/milvus-io/milvus/pkg/v2/common"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb" "github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb" "github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
"github.com/milvus-io/milvus/pkg/v2/proto/querypb"
"github.com/milvus-io/milvus/pkg/v2/util/funcutil" "github.com/milvus-io/milvus/pkg/v2/util/funcutil"
"github.com/milvus-io/milvus/pkg/v2/util/merr" "github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil" "github.com/milvus-io/milvus/pkg/v2/util/typeutil"
@ -1449,6 +1450,7 @@ func TestServer_DescribeIndex(t *testing.T) {
segments: NewSegmentsInfo(), segments: NewSegmentsInfo(),
}, },
mixCoord: mocks.NewMixCoord(t),
allocator: mock0Allocator, allocator: mock0Allocator,
notifyIndexChan: make(chan UniqueID, 1), notifyIndexChan: make(chan UniqueID, 1),
} }
@ -1643,6 +1645,15 @@ func TestServer_DescribeIndex(t *testing.T) {
}) })
t.Run("describe after drop index", func(t *testing.T) { t.Run("describe after drop index", func(t *testing.T) {
s.mixCoord.(*mocks.MixCoord).EXPECT().ShowLoadCollections(
mock.Anything,
mock.Anything,
).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
CollectionIDs: []int64{},
}, nil)
status, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{ status, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{
CollectionID: collID, CollectionID: collID,
PartitionIDs: nil, PartitionIDs: nil,
@ -1973,6 +1984,7 @@ func TestServer_GetIndexStatistics(t *testing.T) {
segments: NewSegmentsInfo(), segments: NewSegmentsInfo(),
}, },
mixCoord: mocks.NewMixCoord(t),
allocator: mock0Allocator, allocator: mock0Allocator,
notifyIndexChan: make(chan UniqueID, 1), notifyIndexChan: make(chan UniqueID, 1),
} }
@ -2084,6 +2096,10 @@ func TestServer_GetIndexStatistics(t *testing.T) {
}) })
t.Run("describe after drop index", func(t *testing.T) { t.Run("describe after drop index", func(t *testing.T) {
s.mixCoord.(*mocks.MixCoord).EXPECT().ShowLoadCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIDs: []int64{},
}, nil)
status, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{ status, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{
CollectionID: collID, CollectionID: collID,
PartitionIDs: nil, PartitionIDs: nil,
@ -2223,6 +2239,13 @@ func TestServer_DropIndex(t *testing.T) {
notifyIndexChan: make(chan UniqueID, 1), notifyIndexChan: make(chan UniqueID, 1),
} }
mixCoord := mocks.NewMixCoord(t)
mixCoord.EXPECT().ShowLoadCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIDs: []int64{},
}, nil)
s.mixCoord = mixCoord
s.meta.segments.SetSegment(segID, &SegmentInfo{ s.meta.segments.SetSegment(segID, &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
ID: segID, ID: segID,

View File

@ -383,3 +383,14 @@ func (c *Client) RunAnalyzer(ctx context.Context, req *querypb.RunAnalyzerReques
return client.RunAnalyzer(ctx, req) return client.RunAnalyzer(ctx, req)
}) })
} }
func (c *Client) DropIndex(ctx context.Context, req *querypb.DropIndexRequest, _ ...grpc.CallOption) (*commonpb.Status, error) {
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(c.nodeID),
)
return wrapGrpcCall(ctx, c, func(client querypb.QueryNodeClient) (*commonpb.Status, error) {
return client.DropIndex(ctx, req)
})
}

View File

@ -402,3 +402,7 @@ func (s *Server) UpdateSchema(ctx context.Context, req *querypb.UpdateSchemaRequ
func (s *Server) RunAnalyzer(ctx context.Context, req *querypb.RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) { func (s *Server) RunAnalyzer(ctx context.Context, req *querypb.RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) {
return s.querynode.RunAnalyzer(ctx, req) return s.querynode.RunAnalyzer(ctx, req)
} }
func (s *Server) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) (*commonpb.Status, error) {
return s.querynode.DropIndex(ctx, req)
}

View File

@ -148,6 +148,65 @@ func (_c *MockQueryNode_DeleteBatch_Call) RunAndReturn(run func(context.Context,
return _c return _c
} }
// DropIndex provides a mock function with given fields: _a0, _a1
func (_m *MockQueryNode) DropIndex(_a0 context.Context, _a1 *querypb.DropIndexRequest) (*commonpb.Status, error) {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 *commonpb.Status
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) (*commonpb.Status, error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) *commonpb.Status); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*commonpb.Status)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *querypb.DropIndexRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockQueryNode_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockQueryNode_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - _a0 context.Context
// - _a1 *querypb.DropIndexRequest
func (_e *MockQueryNode_Expecter) DropIndex(_a0 interface{}, _a1 interface{}) *MockQueryNode_DropIndex_Call {
return &MockQueryNode_DropIndex_Call{Call: _e.mock.On("DropIndex", _a0, _a1)}
}
func (_c *MockQueryNode_DropIndex_Call) Run(run func(_a0 context.Context, _a1 *querypb.DropIndexRequest)) *MockQueryNode_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*querypb.DropIndexRequest))
})
return _c
}
func (_c *MockQueryNode_DropIndex_Call) Return(_a0 *commonpb.Status, _a1 error) *MockQueryNode_DropIndex_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockQueryNode_DropIndex_Call) RunAndReturn(run func(context.Context, *querypb.DropIndexRequest) (*commonpb.Status, error)) *MockQueryNode_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetAddress provides a mock function with no fields // GetAddress provides a mock function with no fields
func (_m *MockQueryNode) GetAddress() string { func (_m *MockQueryNode) GetAddress() string {
ret := _m.Called() ret := _m.Called()

View File

@ -224,6 +224,80 @@ func (_c *MockQueryNodeClient_DeleteBatch_Call) RunAndReturn(run func(context.Co
return _c return _c
} }
// DropIndex provides a mock function with given fields: ctx, in, opts
func (_m *MockQueryNodeClient) DropIndex(ctx context.Context, in *querypb.DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 *commonpb.Status
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest, ...grpc.CallOption) (*commonpb.Status, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest, ...grpc.CallOption) *commonpb.Status); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*commonpb.Status)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *querypb.DropIndexRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockQueryNodeClient_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockQueryNodeClient_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - in *querypb.DropIndexRequest
// - opts ...grpc.CallOption
func (_e *MockQueryNodeClient_Expecter) DropIndex(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryNodeClient_DropIndex_Call {
return &MockQueryNodeClient_DropIndex_Call{Call: _e.mock.On("DropIndex",
append([]interface{}{ctx, in}, opts...)...)}
}
func (_c *MockQueryNodeClient_DropIndex_Call) Run(run func(ctx context.Context, in *querypb.DropIndexRequest, opts ...grpc.CallOption)) *MockQueryNodeClient_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]grpc.CallOption, len(args)-2)
for i, a := range args[2:] {
if a != nil {
variadicArgs[i] = a.(grpc.CallOption)
}
}
run(args[0].(context.Context), args[1].(*querypb.DropIndexRequest), variadicArgs...)
})
return _c
}
func (_c *MockQueryNodeClient_DropIndex_Call) Return(_a0 *commonpb.Status, _a1 error) *MockQueryNodeClient_DropIndex_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockQueryNodeClient_DropIndex_Call) RunAndReturn(run func(context.Context, *querypb.DropIndexRequest, ...grpc.CallOption) (*commonpb.Status, error)) *MockQueryNodeClient_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetComponentStates provides a mock function with given fields: ctx, in, opts // GetComponentStates provides a mock function with given fields: ctx, in, opts
func (_m *MockQueryNodeClient) GetComponentStates(ctx context.Context, in *milvuspb.GetComponentStatesRequest, opts ...grpc.CallOption) (*milvuspb.ComponentStates, error) { func (_m *MockQueryNodeClient) GetComponentStates(ctx context.Context, in *milvuspb.GetComponentStatesRequest, opts ...grpc.CallOption) (*milvuspb.ComponentStates, error) {
_va := make([]interface{}, len(opts)) _va := make([]interface{}, len(opts))

View File

@ -140,6 +140,101 @@ func (_c *MockCSegment_Delete_Call) RunAndReturn(run func(context.Context, *segc
return _c return _c
} }
// DropIndex provides a mock function with given fields: ctx, fieldID
func (_m *MockCSegment) DropIndex(ctx context.Context, fieldID int64) error {
ret := _m.Called(ctx, fieldID)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, fieldID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockCSegment_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockCSegment_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - fieldID int64
func (_e *MockCSegment_Expecter) DropIndex(ctx interface{}, fieldID interface{}) *MockCSegment_DropIndex_Call {
return &MockCSegment_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, fieldID)}
}
func (_c *MockCSegment_DropIndex_Call) Run(run func(ctx context.Context, fieldID int64)) *MockCSegment_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(int64))
})
return _c
}
func (_c *MockCSegment_DropIndex_Call) Return(_a0 error) *MockCSegment_DropIndex_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockCSegment_DropIndex_Call) RunAndReturn(run func(context.Context, int64) error) *MockCSegment_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// DropJSONIndex provides a mock function with given fields: ctx, fieldID, nestedPath
func (_m *MockCSegment) DropJSONIndex(ctx context.Context, fieldID int64, nestedPath string) error {
ret := _m.Called(ctx, fieldID, nestedPath)
if len(ret) == 0 {
panic("no return value specified for DropJSONIndex")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok {
r0 = rf(ctx, fieldID, nestedPath)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockCSegment_DropJSONIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropJSONIndex'
type MockCSegment_DropJSONIndex_Call struct {
*mock.Call
}
// DropJSONIndex is a helper method to define mock.On call
// - ctx context.Context
// - fieldID int64
// - nestedPath string
func (_e *MockCSegment_Expecter) DropJSONIndex(ctx interface{}, fieldID interface{}, nestedPath interface{}) *MockCSegment_DropJSONIndex_Call {
return &MockCSegment_DropJSONIndex_Call{Call: _e.mock.On("DropJSONIndex", ctx, fieldID, nestedPath)}
}
func (_c *MockCSegment_DropJSONIndex_Call) Run(run func(ctx context.Context, fieldID int64, nestedPath string)) *MockCSegment_DropJSONIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(int64), args[2].(string))
})
return _c
}
func (_c *MockCSegment_DropJSONIndex_Call) Return(_a0 error) *MockCSegment_DropJSONIndex_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockCSegment_DropJSONIndex_Call) RunAndReturn(run func(context.Context, int64, string) error) *MockCSegment_DropJSONIndex_Call {
_c.Call.Return(run)
return _c
}
// FinishLoad provides a mock function with no fields // FinishLoad provides a mock function with no fields
func (_m *MockCSegment) FinishLoad() error { func (_m *MockCSegment) FinishLoad() error {
ret := _m.Called() ret := _m.Called()
@ -185,6 +280,52 @@ func (_c *MockCSegment_FinishLoad_Call) RunAndReturn(run func() error) *MockCSeg
return _c return _c
} }
// HasFieldData provides a mock function with given fields: fieldID
func (_m *MockCSegment) HasFieldData(fieldID int64) bool {
ret := _m.Called(fieldID)
if len(ret) == 0 {
panic("no return value specified for HasFieldData")
}
var r0 bool
if rf, ok := ret.Get(0).(func(int64) bool); ok {
r0 = rf(fieldID)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockCSegment_HasFieldData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasFieldData'
type MockCSegment_HasFieldData_Call struct {
*mock.Call
}
// HasFieldData is a helper method to define mock.On call
// - fieldID int64
func (_e *MockCSegment_Expecter) HasFieldData(fieldID interface{}) *MockCSegment_HasFieldData_Call {
return &MockCSegment_HasFieldData_Call{Call: _e.mock.On("HasFieldData", fieldID)}
}
func (_c *MockCSegment_HasFieldData_Call) Run(run func(fieldID int64)) *MockCSegment_HasFieldData_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(int64))
})
return _c
}
func (_c *MockCSegment_HasFieldData_Call) Return(_a0 bool) *MockCSegment_HasFieldData_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockCSegment_HasFieldData_Call) RunAndReturn(run func(int64) bool) *MockCSegment_HasFieldData_Call {
_c.Call.Return(run)
return _c
}
// HasRawData provides a mock function with given fields: fieldID // HasRawData provides a mock function with given fields: fieldID
func (_m *MockCSegment) HasRawData(fieldID int64) bool { func (_m *MockCSegment) HasRawData(fieldID int64) bool {
ret := _m.Called(fieldID) ret := _m.Called(fieldID)

View File

@ -1045,15 +1045,6 @@ func (dit *dropIndexTask) PreExecute(ctx context.Context) error {
} }
dit.collectionID = collID dit.collectionID = collID
loaded, err := isCollectionLoaded(ctx, dit.mixCoord, collID)
if err != nil {
return err
}
if loaded {
return errors.New("index cannot be dropped, collection is loaded, please release it first")
}
return nil return nil
} }

View File

@ -175,64 +175,7 @@ func TestDropIndexTask_PreExecute(t *testing.T) {
dit.mixCoord = qc dit.mixCoord = qc
err := dit.PreExecute(ctx) err := dit.PreExecute(ctx)
assert.Error(t, err) assert.NoError(t, err)
})
t.Run("show collection error", func(t *testing.T) {
qc := getMockQueryCoord()
qc.ExpectedCalls = nil
qc.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(merr.Success(), nil)
qc.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
Status: merr.Success(),
Shards: []*querypb.ShardLeadersList{
{
ChannelName: "channel-1",
NodeIds: []int64{1, 2, 3},
NodeAddrs: []string{"localhost:9000", "localhost:9001", "localhost:9002"},
Serviceable: []bool{true, true, true},
},
},
}, nil)
qc.EXPECT().ShowLoadCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIDs: []int64{collectionID},
}, nil)
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(nil, errors.New("error"))
dit.mixCoord = qc
err := dit.PreExecute(ctx)
assert.Error(t, err)
})
t.Run("show collection fail", func(t *testing.T) {
qc := getMockQueryCoord()
qc.ExpectedCalls = nil
qc.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(merr.Success(), nil)
qc.EXPECT().GetShardLeaders(mock.Anything, mock.Anything).Return(&querypb.GetShardLeadersResponse{
Status: merr.Success(),
Shards: []*querypb.ShardLeadersList{
{
ChannelName: "channel-1",
NodeIds: []int64{1, 2, 3},
NodeAddrs: []string{"localhost:9000", "localhost:9001", "localhost:9002"},
Serviceable: []bool{true, true, true},
},
},
}, nil)
qc.EXPECT().ShowLoadCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: merr.Success(),
CollectionIDs: []int64{collectionID},
}, nil)
qc.EXPECT().ShowLoadCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "fail reason",
},
}, nil)
dit.mixCoord = qc
err := dit.PreExecute(ctx)
assert.Error(t, err)
}) })
} }

View File

@ -127,12 +127,13 @@ func (c *IndexChecker) checkReplica(ctx context.Context, collection *meta.Collec
idSegmentsStats := make(map[int64]*meta.Segment) idSegmentsStats := make(map[int64]*meta.Segment)
targetsStats := make(map[int64][]int64) // segmentID => FieldID targetsStats := make(map[int64][]int64) // segmentID => FieldID
redundant := make(map[int64][]int64) // segmentID => indexIDs
redundantSegments := make(map[int64]*meta.Segment)
for _, segment := range segments { for _, segment := range segments {
// skip update index in read only node // skip update index in read only node
if roNodeSet.Contain(segment.Node) { if roNodeSet.Contain(segment.Node) {
continue continue
} }
missing := c.checkSegment(segment, indexInfos) missing := c.checkSegment(segment, indexInfos)
missingStats := c.checkSegmentStats(segment, schema, collection.LoadFields) missingStats := c.checkSegmentStats(segment, schema, collection.LoadFields)
if len(missing) > 0 { if len(missing) > 0 {
@ -142,6 +143,12 @@ func (c *IndexChecker) checkReplica(ctx context.Context, collection *meta.Collec
targetsStats[segment.GetID()] = missingStats targetsStats[segment.GetID()] = missingStats
idSegmentsStats[segment.GetID()] = segment idSegmentsStats[segment.GetID()] = segment
} }
redundantIndices := c.checkRedundantIndices(segment, indexInfos)
if len(redundantIndices) > 0 {
redundant[segment.GetID()] = redundantIndices
redundantSegments[segment.GetID()] = segment
}
} }
segmentsToUpdate := typeutil.NewSet[int64]() segmentsToUpdate := typeutil.NewSet[int64]()
@ -191,6 +198,11 @@ func (c *IndexChecker) checkReplica(ctx context.Context, collection *meta.Collec
}) })
tasks = append(tasks, tasksStats...) tasks = append(tasks, tasksStats...)
dropTasks := lo.FilterMap(lo.Values(redundantSegments), func(segment *meta.Segment, _ int) (task.Task, bool) {
return c.createSegmentIndexDropTasks(ctx, replica, segment, redundant[segment.GetID()]), true
})
tasks = append(tasks, dropTasks...)
return tasks return tasks
} }
@ -210,6 +222,24 @@ func (c *IndexChecker) checkSegment(segment *meta.Segment, indexInfos []*indexpb
return result return result
} }
// checkRedundantIndices returns redundant indexIDs for each segment
func (c *IndexChecker) checkRedundantIndices(segment *meta.Segment, indexInfos []*indexpb.IndexInfo) []int64 {
var redundant []int64
indexInfoMap := typeutil.NewSet[int64]()
for _, indexInfo := range indexInfos {
indexInfoMap.Insert(indexInfo.IndexID)
}
for indexID := range segment.IndexInfo {
if !indexInfoMap.Contain(indexID) {
redundant = append(redundant, indexID)
}
}
return redundant
}
func (c *IndexChecker) createSegmentUpdateTask(ctx context.Context, segment *meta.Segment, replica *meta.Replica) (task.Task, bool) { func (c *IndexChecker) createSegmentUpdateTask(ctx context.Context, segment *meta.Segment, replica *meta.Replica) (task.Task, bool) {
action := task.NewSegmentActionWithScope(segment.Node, task.ActionTypeUpdate, segment.GetInsertChannel(), segment.GetID(), querypb.DataScope_Historical, int(segment.GetNumOfRows())) action := task.NewSegmentActionWithScope(segment.Node, task.ActionTypeUpdate, segment.GetInsertChannel(), segment.GetID(), querypb.DataScope_Historical, int(segment.GetNumOfRows()))
t, err := task.NewSegmentTask( t, err := task.NewSegmentTask(
@ -289,3 +319,14 @@ func (c *IndexChecker) createSegmentStatsUpdateTask(ctx context.Context, segment
t.SetReason("missing json stats") t.SetReason("missing json stats")
return t, true return t, true
} }
func (c *IndexChecker) createSegmentIndexDropTasks(ctx context.Context, replica *meta.Replica, segment *meta.Segment, indexIDs []int64) task.Task {
if len(indexIDs) == 0 {
return nil
}
action := task.NewDropIndexAction(segment.Node, task.ActionTypeDropIndex, segment.GetInsertChannel(), indexIDs)
t := task.NewDropIndexTask(ctx, c.ID(), replica.GetCollectionID(), replica, segment.GetID(), action)
t.SetPriority(task.TaskPriorityLow)
t.SetReason("drop index")
return t
}

View File

@ -309,7 +309,7 @@ func (suite *IndexCheckerSuite) TestCreateNewIndex() {
// dist // dist
segment := utils.CreateTestSegment(1, 1, 2, 1, 1, "test-insert-channel") segment := utils.CreateTestSegment(1, 1, 2, 1, 1, "test-insert-channel")
segment.IndexInfo = map[int64]*querypb.FieldIndexInfo{101: { segment.IndexInfo = map[int64]*querypb.FieldIndexInfo{1000: {
FieldID: 101, FieldID: 101,
IndexID: 1000, IndexID: 1000,
EnableIndex: true, EnableIndex: true,

View File

@ -147,6 +147,65 @@ func (_c *MockQueryNodeServer_DeleteBatch_Call) RunAndReturn(run func(context.Co
return _c return _c
} }
// DropIndex provides a mock function with given fields: _a0, _a1
func (_m *MockQueryNodeServer) DropIndex(_a0 context.Context, _a1 *querypb.DropIndexRequest) (*commonpb.Status, error) {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 *commonpb.Status
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) (*commonpb.Status, error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) *commonpb.Status); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*commonpb.Status)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *querypb.DropIndexRequest) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockQueryNodeServer_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockQueryNodeServer_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - _a0 context.Context
// - _a1 *querypb.DropIndexRequest
func (_e *MockQueryNodeServer_Expecter) DropIndex(_a0 interface{}, _a1 interface{}) *MockQueryNodeServer_DropIndex_Call {
return &MockQueryNodeServer_DropIndex_Call{Call: _e.mock.On("DropIndex", _a0, _a1)}
}
func (_c *MockQueryNodeServer_DropIndex_Call) Run(run func(_a0 context.Context, _a1 *querypb.DropIndexRequest)) *MockQueryNodeServer_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*querypb.DropIndexRequest))
})
return _c
}
func (_c *MockQueryNodeServer_DropIndex_Call) Return(_a0 *commonpb.Status, _a1 error) *MockQueryNodeServer_DropIndex_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockQueryNodeServer_DropIndex_Call) RunAndReturn(run func(context.Context, *querypb.DropIndexRequest) (*commonpb.Status, error)) *MockQueryNodeServer_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetComponentStates provides a mock function with given fields: _a0, _a1 // GetComponentStates provides a mock function with given fields: _a0, _a1
func (_m *MockQueryNodeServer) GetComponentStates(_a0 context.Context, _a1 *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) { func (_m *MockQueryNodeServer) GetComponentStates(_a0 context.Context, _a1 *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) {
ret := _m.Called(_a0, _a1) ret := _m.Called(_a0, _a1)

View File

@ -28,6 +28,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
commonpb "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
. "github.com/milvus-io/milvus/internal/querycoordv2/params" . "github.com/milvus-io/milvus/internal/querycoordv2/params"
"github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/sessionutil"
@ -187,3 +188,10 @@ func (node *MockQueryNode) getAllSegments() []*querypb.SegmentVersionInfo {
} }
return ret return ret
} }
func (node *MockQueryNode) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "success",
}, nil
}

View File

@ -52,6 +52,7 @@ type Cluster interface {
GetMetrics(ctx context.Context, nodeID int64, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) GetMetrics(ctx context.Context, nodeID int64, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error)
SyncDistribution(ctx context.Context, nodeID int64, req *querypb.SyncDistributionRequest) (*commonpb.Status, error) SyncDistribution(ctx context.Context, nodeID int64, req *querypb.SyncDistributionRequest) (*commonpb.Status, error)
GetComponentStates(ctx context.Context, nodeID int64) (*milvuspb.ComponentStates, error) GetComponentStates(ctx context.Context, nodeID int64) (*milvuspb.ComponentStates, error)
DropIndex(ctx context.Context, nodeID int64, req *querypb.DropIndexRequest) (*commonpb.Status, error)
Start() Start()
Stop() Stop()
} }
@ -268,6 +269,20 @@ func (c *QueryCluster) GetComponentStates(ctx context.Context, nodeID int64) (*m
return resp, err return resp, err
} }
func (c *QueryCluster) DropIndex(ctx context.Context, nodeID int64, req *querypb.DropIndexRequest) (*commonpb.Status, error) {
var (
resp *commonpb.Status
err error
)
err1 := c.send(ctx, nodeID, func(cli types.QueryNodeClient) {
resp, err = cli.DropIndex(ctx, req)
})
if err1 != nil {
return nil, err1
}
return resp, err
}
func (c *QueryCluster) send(ctx context.Context, nodeID int64, fn func(cli types.QueryNodeClient)) error { func (c *QueryCluster) send(ctx context.Context, nodeID int64, fn func(cli types.QueryNodeClient)) error {
node := c.nodeManager.Get(nodeID) node := c.nodeManager.Get(nodeID)
if node == nil { if node == nil {

View File

@ -27,6 +27,66 @@ func (_m *MockCluster) EXPECT() *MockCluster_Expecter {
return &MockCluster_Expecter{mock: &_m.Mock} return &MockCluster_Expecter{mock: &_m.Mock}
} }
// DropIndex provides a mock function with given fields: ctx, nodeID, req
func (_m *MockCluster) DropIndex(ctx context.Context, nodeID int64, req *querypb.DropIndexRequest) (*commonpb.Status, error) {
ret := _m.Called(ctx, nodeID, req)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 *commonpb.Status
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int64, *querypb.DropIndexRequest) (*commonpb.Status, error)); ok {
return rf(ctx, nodeID, req)
}
if rf, ok := ret.Get(0).(func(context.Context, int64, *querypb.DropIndexRequest) *commonpb.Status); ok {
r0 = rf(ctx, nodeID, req)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*commonpb.Status)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int64, *querypb.DropIndexRequest) error); ok {
r1 = rf(ctx, nodeID, req)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockCluster_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockCluster_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - nodeID int64
// - req *querypb.DropIndexRequest
func (_e *MockCluster_Expecter) DropIndex(ctx interface{}, nodeID interface{}, req interface{}) *MockCluster_DropIndex_Call {
return &MockCluster_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, nodeID, req)}
}
func (_c *MockCluster_DropIndex_Call) Run(run func(ctx context.Context, nodeID int64, req *querypb.DropIndexRequest)) *MockCluster_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(int64), args[2].(*querypb.DropIndexRequest))
})
return _c
}
func (_c *MockCluster_DropIndex_Call) Return(_a0 *commonpb.Status, _a1 error) *MockCluster_DropIndex_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockCluster_DropIndex_Call) RunAndReturn(run func(context.Context, int64, *querypb.DropIndexRequest) (*commonpb.Status, error)) *MockCluster_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetComponentStates provides a mock function with given fields: ctx, nodeID // GetComponentStates provides a mock function with given fields: ctx, nodeID
func (_m *MockCluster) GetComponentStates(ctx context.Context, nodeID int64) (*milvuspb.ComponentStates, error) { func (_m *MockCluster) GetComponentStates(ctx context.Context, nodeID int64) (*milvuspb.ComponentStates, error) {
ret := _m.Called(ctx, nodeID) ret := _m.Called(ctx, nodeID)

View File

@ -34,6 +34,7 @@ const (
ActionTypeReduce ActionTypeReduce
ActionTypeUpdate ActionTypeUpdate
ActionTypeStatsUpdate ActionTypeStatsUpdate
ActionTypeDropIndex
) )
var ActionTypeName = map[ActionType]string{ var ActionTypeName = map[ActionType]string{
@ -250,3 +251,33 @@ func (action *LeaderAction) GetLeaderID() typeutil.UniqueID {
func (action *LeaderAction) IsFinished(distMgr *meta.DistributionManager) bool { func (action *LeaderAction) IsFinished(distMgr *meta.DistributionManager) bool {
return action.rpcReturned.Load() return action.rpcReturned.Load()
} }
type DropIndexAction struct {
*BaseAction
indexIDs []int64
rpcReturned atomic.Bool
}
func NewDropIndexAction(nodeID typeutil.UniqueID, typ ActionType, shard string, indexIDs []int64) *DropIndexAction {
return &DropIndexAction{
BaseAction: NewBaseAction(nodeID, typ, shard, 0),
indexIDs: indexIDs,
rpcReturned: *atomic.NewBool(false),
}
}
func (action *DropIndexAction) IndexIDs() []int64 {
return action.indexIDs
}
func (action *DropIndexAction) IsFinished(distMgr *meta.DistributionManager) bool {
return action.rpcReturned.Load()
}
func (action *DropIndexAction) Desc() string {
return fmt.Sprintf("type:%s, node id: %d, index ids: %v", action.Type().String(), action.Node(), action.IndexIDs())
}
func (action *DropIndexAction) String() string {
return action.BaseAction.String() + fmt.Sprintf(`{[indexIDs=%v]}`, action.IndexIDs())
}

View File

@ -145,6 +145,9 @@ func (ex *Executor) Execute(task Task, step int) bool {
case *LeaderAction: case *LeaderAction:
ex.executeLeaderAction(task.(*LeaderTask), step) ex.executeLeaderAction(task.(*LeaderTask), step)
case *DropIndexAction:
ex.executeDropIndexAction(task.(*DropIndexTask), step)
} }
}() }()
@ -552,6 +555,63 @@ func (ex *Executor) executeLeaderAction(task *LeaderTask, step int) {
} }
} }
func (ex *Executor) executeDropIndexAction(task *DropIndexTask, step int) {
action := task.Actions()[step].(*DropIndexAction)
defer action.rpcReturned.Store(true)
ctx := task.Context()
log := log.Ctx(ctx).With(
zap.Int64("taskID", task.ID()),
zap.Int64("collectionID", task.CollectionID()),
zap.Int64("replicaID", task.ReplicaID()),
zap.String("shard", task.Shard()),
zap.Int64("node", action.Node()),
zap.String("source", task.Source().String()),
zap.Int64s("indexIDs", action.indexIDs),
)
var err error
defer func() {
if err != nil {
task.Fail(err)
}
ex.removeTask(task, step)
}()
view := ex.dist.ChannelDistManager.GetShardLeader(task.Shard(), task.replica)
if view == nil {
err = merr.WrapErrChannelNotFound(task.Shard(), "shard delegator not found")
log.Warn("failed to get shard leader", zap.Error(err))
return
}
req := &querypb.DropIndexRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_DropIndex),
commonpbutil.WithMsgID(task.ID()),
),
SegmentID: task.SegmentID(),
IndexIDs: action.indexIDs,
Channel: task.Shard(),
NeedTransfer: true,
}
startTs := time.Now()
log.Info("drop index...")
status, err := ex.cluster.DropIndex(task.Context(), view.Node, req)
if err != nil {
log.Warn("failed to drop index", zap.Error(err))
return
}
if !merr.Ok(status) {
err = merr.Error(status)
log.Warn("failed to drop index", zap.Error(err))
return
}
elapsed := time.Since(startTs)
log.Info("drop index done", zap.Duration("elapsed", elapsed))
}
func (ex *Executor) updatePartStatsVersions(task *LeaderTask, step int) error { func (ex *Executor) updatePartStatsVersions(task *LeaderTask, step int) error {
action := task.Actions()[step].(*LeaderAction) action := task.Actions()[step].(*LeaderAction)
defer action.rpcReturned.Store(true) defer action.rpcReturned.Store(true)

View File

@ -50,6 +50,7 @@ const (
TaskTypeMove TaskTypeMove
TaskTypeUpdate TaskTypeUpdate
TaskTypeStatsUpdate TaskTypeStatsUpdate
TaskTypeDropIndex
) )
var TaskTypeName = map[Type]string{ var TaskTypeName = map[Type]string{
@ -89,6 +90,14 @@ func NewReplicaLeaderIndex(task *LeaderTask) replicaSegmentIndex {
} }
} }
func NewReplicaDropIndex(task *DropIndexTask) replicaSegmentIndex {
return replicaSegmentIndex{
ReplicaID: task.ReplicaID(),
SegmentID: task.SegmentID(),
IsGrowing: false,
}
}
type replicaChannelIndex struct { type replicaChannelIndex struct {
ReplicaID int64 ReplicaID int64
Channel string Channel string
@ -556,6 +565,23 @@ func (scheduler *taskScheduler) preAdd(task Task) error {
return nil return nil
} }
return merr.WrapErrServiceInternal("task with the same segment exists")
}
case *DropIndexTask:
index := NewReplicaDropIndex(task)
if old, ok := scheduler.segmentTasks.Get(index); ok {
if task.Priority() > old.Priority() {
log.Ctx(scheduler.ctx).Info("replace old task, the new one with higher priority",
zap.Int64("oldID", old.ID()),
zap.String("oldPriority", old.Priority().String()),
zap.Int64("newID", task.ID()),
zap.String("newPriority", task.Priority().String()),
)
old.Cancel(merr.WrapErrServiceInternal("replaced with the other one with higher priority"))
scheduler.remove(old)
return nil
}
return merr.WrapErrServiceInternal("task with the same segment exists") return merr.WrapErrServiceInternal("task with the same segment exists")
} }
default: default:
@ -1071,6 +1097,10 @@ func (scheduler *taskScheduler) checkStale(task Task) error {
return err return err
} }
case *DropIndexTask:
if err := scheduler.checkDropIndexTaskStale(task); err != nil {
return err
}
default: default:
panic(fmt.Sprintf("checkStale: forget to check task type: %+v", task)) panic(fmt.Sprintf("checkStale: forget to check task type: %+v", task))
} }
@ -1178,3 +1208,13 @@ func (scheduler *taskScheduler) checkLeaderTaskStale(task *LeaderTask) error {
} }
return nil return nil
} }
func (scheduler *taskScheduler) checkDropIndexTaskStale(task *DropIndexTask) error {
for _, action := range task.Actions() {
if ok, _ := scheduler.nodeMgr.IsStoppingNode(action.Node()); ok {
log.Ctx(task.Context()).Warn("task stale due to node offline", WrapTaskLog(task, zap.String("channel", task.Shard()))...)
return merr.WrapErrNodeOffline(action.Node())
}
}
return nil
}

View File

@ -523,6 +523,26 @@ func (task *LeaderTask) MarshalJSON() ([]byte, error) {
return marshalJSON(task) return marshalJSON(task)
} }
type DropIndexTask struct {
*baseTask
segmentID typeutil.UniqueID
}
func NewDropIndexTask(ctx context.Context,
source Source,
collectionID typeutil.UniqueID,
replica *meta.Replica,
segmentID typeutil.UniqueID,
action *DropIndexAction,
) *DropIndexTask {
base := newBaseTask(ctx, source, collectionID, replica, action.Shard, fmt.Sprintf("DropIndexTask-%s", action.Type().String()))
base.actions = []Action{action}
return &DropIndexTask{
baseTask: base,
segmentID: segmentID,
}
}
func marshalJSON(task Task) ([]byte, error) { func marshalJSON(task Task) ([]byte, error) {
return json.Marshal(&metricsinfo.QueryCoordTask{ return json.Marshal(&metricsinfo.QueryCoordTask{
TaskName: task.Name(), TaskName: task.Name(),
@ -538,3 +558,7 @@ func marshalJSON(task Task) ([]byte, error) {
Reason: task.GetReason(), Reason: task.GetReason(),
}) })
} }
func (task *DropIndexTask) SegmentID() typeutil.UniqueID {
return task.segmentID
}

View File

@ -98,6 +98,8 @@ func GetTaskType(task Task) Type {
return TaskTypeUpdate return TaskTypeUpdate
case task.Actions()[0].Type() == ActionTypeStatsUpdate: case task.Actions()[0].Type() == ActionTypeStatsUpdate:
return TaskTypeStatsUpdate return TaskTypeStatsUpdate
case task.Actions()[0].Type() == ActionTypeDropIndex:
return TaskTypeDropIndex
} }
return 0 return 0
} }

View File

@ -31,6 +31,7 @@ import (
// Manager is the interface for worker manager. // Manager is the interface for worker manager.
type Manager interface { type Manager interface {
GetWorker(ctx context.Context, nodeID int64) (Worker, error) GetWorker(ctx context.Context, nodeID int64) (Worker, error)
GetAllWorkers() []Worker
} }
// WorkerBuilder is function alias to build a worker from NodeID // WorkerBuilder is function alias to build a worker from NodeID
@ -74,6 +75,10 @@ func (m *grpcWorkerManager) GetWorker(ctx context.Context, nodeID int64) (Worker
return worker, nil return worker, nil
} }
func (m *grpcWorkerManager) GetAllWorkers() []Worker {
return m.workers.Values()
}
func NewWorkerManager(builder WorkerBuilder) Manager { func NewWorkerManager(builder WorkerBuilder) Manager {
return &grpcWorkerManager{ return &grpcWorkerManager{
workers: typeutil.NewConcurrentMap[int64, Worker](), workers: typeutil.NewConcurrentMap[int64, Worker](),

View File

@ -21,6 +21,53 @@ func (_m *MockManager) EXPECT() *MockManager_Expecter {
return &MockManager_Expecter{mock: &_m.Mock} return &MockManager_Expecter{mock: &_m.Mock}
} }
// GetAllWorkers provides a mock function with no fields
func (_m *MockManager) GetAllWorkers() []Worker {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllWorkers")
}
var r0 []Worker
if rf, ok := ret.Get(0).(func() []Worker); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]Worker)
}
}
return r0
}
// MockManager_GetAllWorkers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllWorkers'
type MockManager_GetAllWorkers_Call struct {
*mock.Call
}
// GetAllWorkers is a helper method to define mock.On call
func (_e *MockManager_Expecter) GetAllWorkers() *MockManager_GetAllWorkers_Call {
return &MockManager_GetAllWorkers_Call{Call: _e.mock.On("GetAllWorkers")}
}
func (_c *MockManager_GetAllWorkers_Call) Run(run func()) *MockManager_GetAllWorkers_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockManager_GetAllWorkers_Call) Return(_a0 []Worker) *MockManager_GetAllWorkers_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockManager_GetAllWorkers_Call) RunAndReturn(run func() []Worker) *MockManager_GetAllWorkers_Call {
_c.Call.Return(run)
return _c
}
// GetWorker provides a mock function with given fields: ctx, nodeID // GetWorker provides a mock function with given fields: ctx, nodeID
func (_m *MockManager) GetWorker(ctx context.Context, nodeID int64) (Worker, error) { func (_m *MockManager) GetWorker(ctx context.Context, nodeID int64) (Worker, error) {
ret := _m.Called(ctx, nodeID) ret := _m.Called(ctx, nodeID)

View File

@ -135,6 +135,53 @@ func (_c *MockWorker_DeleteBatch_Call) RunAndReturn(run func(context.Context, *q
return _c return _c
} }
// DropIndex provides a mock function with given fields: ctx, req
func (_m *MockWorker) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error {
ret := _m.Called(ctx, req)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) error); ok {
r0 = rf(ctx, req)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockWorker_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockWorker_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - req *querypb.DropIndexRequest
func (_e *MockWorker_Expecter) DropIndex(ctx interface{}, req interface{}) *MockWorker_DropIndex_Call {
return &MockWorker_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, req)}
}
func (_c *MockWorker_DropIndex_Call) Run(run func(ctx context.Context, req *querypb.DropIndexRequest)) *MockWorker_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*querypb.DropIndexRequest))
})
return _c
}
func (_c *MockWorker_DropIndex_Call) Return(_a0 error) *MockWorker_DropIndex_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWorker_DropIndex_Call) RunAndReturn(run func(context.Context, *querypb.DropIndexRequest) error) *MockWorker_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetStatistics provides a mock function with given fields: ctx, req // GetStatistics provides a mock function with given fields: ctx, req
func (_m *MockWorker) GetStatistics(ctx context.Context, req *querypb.GetStatisticsRequest) (*internalpb.GetStatisticsResponse, error) { func (_m *MockWorker) GetStatistics(ctx context.Context, req *querypb.GetStatisticsRequest) (*internalpb.GetStatisticsResponse, error) {
ret := _m.Called(ctx, req) ret := _m.Called(ctx, req)

View File

@ -46,6 +46,7 @@ type Worker interface {
QueryStreamSegments(ctx context.Context, req *querypb.QueryRequest, srv streamrpc.QueryStreamServer) error QueryStreamSegments(ctx context.Context, req *querypb.QueryRequest, srv streamrpc.QueryStreamServer) error
GetStatistics(ctx context.Context, req *querypb.GetStatisticsRequest) (*internalpb.GetStatisticsResponse, error) GetStatistics(ctx context.Context, req *querypb.GetStatisticsRequest) (*internalpb.GetStatisticsResponse, error)
UpdateSchema(ctx context.Context, req *querypb.UpdateSchemaRequest) (*commonpb.Status, error) UpdateSchema(ctx context.Context, req *querypb.UpdateSchemaRequest) (*commonpb.Status, error)
DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error
IsHealthy() bool IsHealthy() bool
Stop() Stop()
@ -254,6 +255,15 @@ func (w *remoteWorker) UpdateSchema(ctx context.Context, req *querypb.UpdateSche
return client.UpdateSchema(ctx, req) return client.UpdateSchema(ctx, req)
} }
func (w *remoteWorker) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error {
client := w.getClient()
status, err := client.DropIndex(ctx, req)
if err = merr.CheckRPCCall(status, err); err != nil {
return err
}
return nil
}
func (w *remoteWorker) IsHealthy() bool { func (w *remoteWorker) IsHealthy() bool {
return true return true
} }

View File

@ -91,6 +91,7 @@ type ShardDelegator interface {
SyncTargetVersion(action *querypb.SyncAction, partitions []int64) SyncTargetVersion(action *querypb.SyncAction, partitions []int64)
GetChannelQueryView() *channelQueryView GetChannelQueryView() *channelQueryView
GetDeleteBufferSize() (entryNum int64, memorySize int64) GetDeleteBufferSize() (entryNum int64, memorySize int64)
DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error
// manage exclude segments // manage exclude segments
AddExcludedSegments(excludeInfo map[int64]uint64) AddExcludedSegments(excludeInfo map[int64]uint64)

View File

@ -991,3 +991,13 @@ func (sd *shardDelegator) buildBM25IDF(req *internalpb.SearchRequest) (float64,
req.PlaceholderGroup = funcutil.SparseVectorDataToPlaceholderGroupBytes(idfSparseVector) req.PlaceholderGroup = funcutil.SparseVectorDataToPlaceholderGroupBytes(idfSparseVector)
return avgdl, nil return avgdl, nil
} }
func (sd *shardDelegator) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error {
workers := sd.workerManager.GetAllWorkers()
for _, worker := range workers {
if err := worker.DropIndex(ctx, req); err != nil {
return err
}
}
return nil
}

View File

@ -140,6 +140,53 @@ func (_c *MockShardDelegator_Collection_Call) RunAndReturn(run func() int64) *Mo
return _c return _c
} }
// DropIndex provides a mock function with given fields: ctx, req
func (_m *MockShardDelegator) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error {
ret := _m.Called(ctx, req)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *querypb.DropIndexRequest) error); ok {
r0 = rf(ctx, req)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockShardDelegator_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockShardDelegator_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - req *querypb.DropIndexRequest
func (_e *MockShardDelegator_Expecter) DropIndex(ctx interface{}, req interface{}) *MockShardDelegator_DropIndex_Call {
return &MockShardDelegator_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, req)}
}
func (_c *MockShardDelegator_DropIndex_Call) Run(run func(ctx context.Context, req *querypb.DropIndexRequest)) *MockShardDelegator_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*querypb.DropIndexRequest))
})
return _c
}
func (_c *MockShardDelegator_DropIndex_Call) Return(_a0 error) *MockShardDelegator_DropIndex_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockShardDelegator_DropIndex_Call) RunAndReturn(run func(context.Context, *querypb.DropIndexRequest) error) *MockShardDelegator_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// GetChannelQueryView provides a mock function with no fields // GetChannelQueryView provides a mock function with no fields
func (_m *MockShardDelegator) GetChannelQueryView() *channelQueryView { func (_m *MockShardDelegator) GetChannelQueryView() *channelQueryView {
ret := _m.Called() ret := _m.Called()

View File

@ -82,5 +82,10 @@ func (w *LocalWorker) IsHealthy() bool {
return true return true
} }
func (w *LocalWorker) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) error {
status, err := w.node.DropIndex(ctx, req)
return merr.CheckRPCCall(status, err)
}
func (w *LocalWorker) Stop() { func (w *LocalWorker) Stop() {
} }

View File

@ -270,6 +270,53 @@ func (_c *MockSegment_Delete_Call) RunAndReturn(run func(context.Context, storag
return _c return _c
} }
// DropIndex provides a mock function with given fields: ctx, indexID
func (_m *MockSegment) DropIndex(ctx context.Context, indexID int64) error {
ret := _m.Called(ctx, indexID)
if len(ret) == 0 {
panic("no return value specified for DropIndex")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, indexID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockSegment_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex'
type MockSegment_DropIndex_Call struct {
*mock.Call
}
// DropIndex is a helper method to define mock.On call
// - ctx context.Context
// - indexID int64
func (_e *MockSegment_Expecter) DropIndex(ctx interface{}, indexID interface{}) *MockSegment_DropIndex_Call {
return &MockSegment_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, indexID)}
}
func (_c *MockSegment_DropIndex_Call) Run(run func(ctx context.Context, indexID int64)) *MockSegment_DropIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(int64))
})
return _c
}
func (_c *MockSegment_DropIndex_Call) Return(_a0 error) *MockSegment_DropIndex_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockSegment_DropIndex_Call) RunAndReturn(run func(context.Context, int64) error) *MockSegment_DropIndex_Call {
_c.Call.Return(run)
return _c
}
// ExistIndex provides a mock function with given fields: fieldID // ExistIndex provides a mock function with given fields: fieldID
func (_m *MockSegment) ExistIndex(fieldID int64) bool { func (_m *MockSegment) ExistIndex(fieldID int64) bool {
ret := _m.Called(fieldID) ret := _m.Called(fieldID)

View File

@ -511,6 +511,43 @@ func (s *LocalSegment) HasRawData(fieldID int64) bool {
return s.csegment.HasRawData(fieldID) return s.csegment.HasRawData(fieldID)
} }
func (s *LocalSegment) HasFieldData(fieldID int64) bool {
if !s.ptrLock.PinIf(state.IsNotReleased) {
return false
}
defer s.ptrLock.Unpin()
return s.csegment.HasFieldData(fieldID)
}
func (s *LocalSegment) DropIndex(ctx context.Context, indexID int64) error {
if !s.ptrLock.PinIf(state.IsNotReleased) {
return merr.WrapErrSegmentNotLoaded(s.ID(), "segment released")
}
defer s.ptrLock.Unpin()
if indexInfo, ok := s.fieldIndexes.Get(indexID); ok {
field := typeutil.GetField(s.collection.schema.Load(), indexInfo.IndexInfo.FieldID)
if typeutil.IsJSONType(field.GetDataType()) {
nestedPath, err := funcutil.GetAttrByKeyFromRepeatedKV(common.JSONPathKey, indexInfo.IndexInfo.GetIndexParams())
if err != nil {
return err
}
err = s.csegment.DropJSONIndex(ctx, indexInfo.IndexInfo.FieldID, nestedPath)
if err != nil {
return err
}
} else {
err := s.csegment.DropIndex(ctx, indexInfo.IndexInfo.FieldID)
if err != nil {
return err
}
}
s.fieldIndexes.Remove(indexID)
}
return nil
}
func (s *LocalSegment) Indexes() []*IndexedFieldInfo { func (s *LocalSegment) Indexes() []*IndexedFieldInfo {
var result []*IndexedFieldInfo var result []*IndexedFieldInfo
s.fieldIndexes.Range(func(key int64, value *IndexedFieldInfo) bool { s.fieldIndexes.Range(func(key int64, value *IndexedFieldInfo) bool {
@ -789,7 +826,7 @@ func (s *LocalSegment) LoadMultiFieldData(ctx context.Context) error {
return nil return nil
} }
func (s *LocalSegment) LoadFieldData(ctx context.Context, fieldID int64, rowCount int64, field *datapb.FieldBinlog) error { func (s *LocalSegment) LoadFieldData(ctx context.Context, fieldID int64, rowCount int64, field *datapb.FieldBinlog, warmupPolicy ...string) error {
if !s.ptrLock.PinIf(state.IsNotReleased) { if !s.ptrLock.PinIf(state.IsNotReleased) {
return merr.WrapErrSegmentNotLoaded(s.ID(), "segment released") return merr.WrapErrSegmentNotLoaded(s.ID(), "segment released")
} }
@ -823,6 +860,10 @@ func (s *LocalSegment) LoadFieldData(ctx context.Context, fieldID int64, rowCoun
StorageVersion: s.LoadInfo().GetStorageVersion(), StorageVersion: s.LoadInfo().GetStorageVersion(),
} }
if len(warmupPolicy) > 0 {
req.WarmupPolicy = warmupPolicy[0]
}
GetLoadPool().Submit(func() (any, error) { GetLoadPool().Submit(func() (any, error) {
start := time.Now() start := time.Now()
defer func() { defer func() {

View File

@ -77,6 +77,7 @@ type Segment interface {
ExistIndex(fieldID int64) bool ExistIndex(fieldID int64) bool
Indexes() []*IndexedFieldInfo Indexes() []*IndexedFieldInfo
HasRawData(fieldID int64) bool HasRawData(fieldID int64) bool
DropIndex(ctx context.Context, indexID int64) error
// Modification related // Modification related
Insert(ctx context.Context, rowIDs []int64, timestamps []typeutil.Timestamp, record *segcorepb.InsertRecord) error Insert(ctx context.Context, rowIDs []int64, timestamps []typeutil.Timestamp, record *segcorepb.InsertRecord) error

View File

@ -121,6 +121,10 @@ func (s *L0Segment) HasRawData(fieldID int64) bool {
return false return false
} }
func (s *L0Segment) DropIndex(ctx context.Context, indexID int64) error {
return nil
}
func (s *L0Segment) Indexes() []*IndexedFieldInfo { func (s *L0Segment) Indexes() []*IndexedFieldInfo {
return nil return nil
} }

View File

@ -898,6 +898,17 @@ func (loader *segmentLoader) loadSealedSegment(ctx context.Context, loadInfo *qu
return err return err
} }
} }
if !storagecommon.IsVectorDataType(field.GetDataType()) &&
!segment.HasFieldData(fieldID) &&
loadInfo.GetStorageVersion() != storage.StorageV2 {
// Lazy load raw data to avoid search failure after dropping index.
// storage v2 will load all scalar fields so we don't need to load raw data for them.
if err = segment.LoadFieldData(ctx, fieldID, loadInfo.GetNumOfRows(), info.FieldBinlog, "disable"); err != nil {
log.Warn("load raw data failed", zap.Int64("fieldID", fieldID), zap.Error(err))
return err
}
}
} }
complementScalarDataSpan := tr.RecordSpan() complementScalarDataSpan := tr.RecordSpan()
if err := loadSealedSegmentFields(ctx, collection, segment, fieldBinlogs, loadInfo.GetNumOfRows()); err != nil { if err := loadSealedSegmentFields(ctx, collection, segment, fieldBinlogs, loadInfo.GetNumOfRows()); err != nil {

View File

@ -1627,3 +1627,33 @@ func (node *QueryNode) getDistributionModifyTS() int64 {
defer node.lastModifyLock.RUnlock() defer node.lastModifyLock.RUnlock()
return node.lastModifyTs return node.lastModifyTs
} }
func (node *QueryNode) DropIndex(ctx context.Context, req *querypb.DropIndexRequest) (*commonpb.Status, error) {
defer node.updateDistributionModifyTS()
if req.GetNeedTransfer() {
shardDelegator, ok := node.delegators.Get(req.GetChannel())
if !ok {
return merr.Status(merr.WrapErrChannelNotFound(req.GetChannel())), nil
}
req.NeedTransfer = false
if err := shardDelegator.DropIndex(ctx, req); err != nil {
return merr.Status(err), nil
}
}
segments, err := node.manager.Segment.GetAndPinBy(segments.WithID(req.GetSegmentID()))
if err != nil {
return merr.Status(err), nil
}
if len(segments) == 0 {
return merr.Success(), nil
}
defer node.manager.Segment.Unpin(segments)
segment := segments[0]
indexIDs := req.GetIndexIDs()
for _, indexID := range indexIDs {
segment.DropIndex(ctx, indexID)
}
return merr.Success(), nil
}

View File

@ -145,3 +145,7 @@ func (m *GrpcQueryNodeClient) RunAnalyzer(ctx context.Context, in *querypb.RunAn
func (m *GrpcQueryNodeClient) Close() error { func (m *GrpcQueryNodeClient) Close() error {
return m.Err return m.Err
} }
func (m *GrpcQueryNodeClient) DropIndex(ctx context.Context, in *querypb.DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return &commonpb.Status{}, m.Err
}

View File

@ -2,6 +2,8 @@ package segcore
/* /*
#cgo pkg-config: milvus_core #cgo pkg-config: milvus_core
#include "common/type_c.h"
#include "segcore/load_field_data_c.h" #include "segcore/load_field_data_c.h"
*/ */
import "C" import "C"
@ -13,6 +15,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus/internal/storage" "github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/initcore"
"github.com/milvus-io/milvus/pkg/v2/proto/datapb" "github.com/milvus-io/milvus/pkg/v2/proto/datapb"
"github.com/milvus-io/milvus/pkg/v2/proto/segcorepb" "github.com/milvus-io/milvus/pkg/v2/proto/segcorepb"
"github.com/milvus-io/milvus/pkg/v2/util/typeutil" "github.com/milvus-io/milvus/pkg/v2/util/typeutil"
@ -40,6 +43,7 @@ type LoadFieldDataRequest struct {
RowCount int64 RowCount int64
StorageVersion int64 StorageVersion int64
LoadPriority commonpb.LoadPriority LoadPriority commonpb.LoadPriority
WarmupPolicy string
} }
type LoadFieldDataInfo struct { type LoadFieldDataInfo struct {
@ -81,6 +85,13 @@ func (req *LoadFieldDataRequest) getCLoadFieldDataRequest() (result *cLoadFieldD
C.EnableMmap(cLoadFieldDataInfo, cFieldID, C.bool(field.EnableMMap)) C.EnableMmap(cLoadFieldDataInfo, cFieldID, C.bool(field.EnableMMap))
} }
C.SetLoadPriority(cLoadFieldDataInfo, C.int32_t(req.LoadPriority)) C.SetLoadPriority(cLoadFieldDataInfo, C.int32_t(req.LoadPriority))
if len(req.WarmupPolicy) > 0 {
warmupPolicy, err := initcore.ConvertCacheWarmupPolicy(req.WarmupPolicy)
if err != nil {
return nil, errors.Wrapf(err, "ConvertCacheWarmupPolicy failed at warmupPolicy, %s", req.WarmupPolicy)
}
C.AppendWarmupPolicy(cLoadFieldDataInfo, C.CacheWarmupPolicy(warmupPolicy))
}
return &cLoadFieldDataRequest{ return &cLoadFieldDataRequest{
cLoadFieldDataInfo: cLoadFieldDataInfo, cLoadFieldDataInfo: cLoadFieldDataInfo,
}, nil }, nil

View File

@ -101,6 +101,12 @@ func (s *cSegmentImpl) HasRawData(fieldID int64) bool {
return bool(ret) return bool(ret)
} }
// HasFieldData checks if the segment has field data.
func (s *cSegmentImpl) HasFieldData(fieldID int64) bool {
ret := C.HasFieldData(s.ptr, C.int64_t(fieldID))
return bool(ret)
}
// Search requests a search on the segment. // Search requests a search on the segment.
func (s *cSegmentImpl) Search(ctx context.Context, searchReq *SearchRequest) (*SearchResult, error) { func (s *cSegmentImpl) Search(ctx context.Context, searchReq *SearchRequest) (*SearchResult, error) {
traceCtx := ParseCTraceContext(ctx) traceCtx := ParseCTraceContext(ctx)
@ -291,6 +297,22 @@ func (s *cSegmentImpl) FinishLoad() error {
return nil return nil
} }
func (s *cSegmentImpl) DropIndex(ctx context.Context, fieldID int64) error {
status := C.DropSealedSegmentIndex(s.ptr, C.int64_t(fieldID))
if err := ConsumeCStatusIntoError(&status); err != nil {
return errors.Wrap(err, "failed to drop index")
}
return nil
}
func (s *cSegmentImpl) DropJSONIndex(ctx context.Context, fieldID int64, nestedPath string) error {
status := C.DropSealedSegmentJSONIndex(s.ptr, C.int64_t(fieldID), C.CString(nestedPath))
if err := ConsumeCStatusIntoError(&status); err != nil {
return errors.Wrap(err, "failed to drop json index")
}
return nil
}
// Release releases the segment. // Release releases the segment.
func (s *cSegmentImpl) Release() { func (s *cSegmentImpl) Release() {
C.DeleteSegment(s.ptr) C.DeleteSegment(s.ptr)

View File

@ -37,6 +37,11 @@ type SealedSegment interface {
// AddFieldDataInfo adds field data info into the segment. // AddFieldDataInfo adds field data info into the segment.
AddFieldDataInfo(ctx context.Context, request *AddFieldDataInfoRequest) (*AddFieldDataInfoResult, error) AddFieldDataInfo(ctx context.Context, request *AddFieldDataInfoRequest) (*AddFieldDataInfoResult, error)
// DropIndex drops the index of the segment.
DropIndex(ctx context.Context, fieldID int64) error
DropJSONIndex(ctx context.Context, fieldID int64, nestedPath string) error
} }
// basicSegmentMethodSet is the basic method set of a segment. // basicSegmentMethodSet is the basic method set of a segment.
@ -57,6 +62,9 @@ type basicSegmentMethodSet interface {
// HasRawData checks if the segment has raw data. // HasRawData checks if the segment has raw data.
HasRawData(fieldID int64) bool HasRawData(fieldID int64) bool
// HasFieldData checks if the segment has field data.
HasFieldData(fieldID int64) bool
// Search requests a search on the segment. // Search requests a search on the segment.
Search(ctx context.Context, searchReq *SearchRequest) (*SearchResult, error) Search(ctx context.Context, searchReq *SearchRequest) (*SearchResult, error)

View File

@ -160,6 +160,10 @@ func (qn *qnServerWrapper) RunAnalyzer(ctx context.Context, in *querypb.RunAnaly
return qn.QueryNode.RunAnalyzer(ctx, in) return qn.QueryNode.RunAnalyzer(ctx, in)
} }
func (qn *qnServerWrapper) DropIndex(ctx context.Context, in *querypb.DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return qn.QueryNode.DropIndex(ctx, in)
}
func WrapQueryNodeServerAsClient(qn types.QueryNode) types.QueryNodeClient { func WrapQueryNodeServerAsClient(qn types.QueryNode) types.QueryNodeClient {
return &qnServerWrapper{ return &qnServerWrapper{
QueryNode: qn, QueryNode: qn,

View File

@ -171,6 +171,7 @@ service QueryNode {
rpc UpdateSchema(UpdateSchemaRequest) returns (common.Status) {} rpc UpdateSchema(UpdateSchemaRequest) returns (common.Status) {}
rpc RunAnalyzer(RunAnalyzerRequest) returns(milvus.RunAnalyzerResponse){} rpc RunAnalyzer(RunAnalyzerRequest) returns(milvus.RunAnalyzerResponse){}
rpc DropIndex(DropIndexRequest) returns (common.Status) {}
} }
// --------------------QueryCoord grpc request and response proto------------------ // --------------------QueryCoord grpc request and response proto------------------
@ -1000,4 +1001,11 @@ message ListLoadedSegmentsRequest {
message ListLoadedSegmentsResponse { message ListLoadedSegmentsResponse {
common.Status status = 1; common.Status status = 1;
repeated int64 segmentIDs = 2; repeated int64 segmentIDs = 2;
} }
message DropIndexRequest {
common.MsgBase base = 1;
int64 segmentID = 2;
repeated int64 indexIDs = 3;
string channel = 4;
bool need_transfer = 5;
}

View File

@ -7731,6 +7731,85 @@ func (x *ListLoadedSegmentsResponse) GetSegmentIDs() []int64 {
return nil return nil
} }
type DropIndexRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
SegmentID int64 `protobuf:"varint,2,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
IndexIDs []int64 `protobuf:"varint,3,rep,packed,name=indexIDs,proto3" json:"indexIDs,omitempty"`
Channel string `protobuf:"bytes,4,opt,name=channel,proto3" json:"channel,omitempty"`
NeedTransfer bool `protobuf:"varint,5,opt,name=need_transfer,json=needTransfer,proto3" json:"need_transfer,omitempty"`
}
func (x *DropIndexRequest) Reset() {
*x = DropIndexRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_query_coord_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DropIndexRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DropIndexRequest) ProtoMessage() {}
func (x *DropIndexRequest) ProtoReflect() protoreflect.Message {
mi := &file_query_coord_proto_msgTypes[92]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DropIndexRequest.ProtoReflect.Descriptor instead.
func (*DropIndexRequest) Descriptor() ([]byte, []int) {
return file_query_coord_proto_rawDescGZIP(), []int{92}
}
func (x *DropIndexRequest) GetBase() *commonpb.MsgBase {
if x != nil {
return x.Base
}
return nil
}
func (x *DropIndexRequest) GetSegmentID() int64 {
if x != nil {
return x.SegmentID
}
return 0
}
func (x *DropIndexRequest) GetIndexIDs() []int64 {
if x != nil {
return x.IndexIDs
}
return nil
}
func (x *DropIndexRequest) GetChannel() string {
if x != nil {
return x.Channel
}
return ""
}
func (x *DropIndexRequest) GetNeedTransfer() bool {
if x != nil {
return x.NeedTransfer
}
return false
}
var File_query_coord_proto protoreflect.FileDescriptor var File_query_coord_proto protoreflect.FileDescriptor
var file_query_coord_proto_rawDesc = []byte{ var file_query_coord_proto_rawDesc = []byte{
@ -9293,7 +9372,19 @@ var file_query_coord_proto_rawDesc = []byte{
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e,
0x0a, 0x0a, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x0a, 0x0a, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03,
0x28, 0x03, 0x52, 0x0a, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x2a, 0x36, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x22, 0xbd,
0x01, 0x0a, 0x10, 0x44, 0x72, 0x6f, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x73, 0x67, 0x42, 0x61, 0x73, 0x65, 0x52,
0x04, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74,
0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e,
0x74, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x44, 0x73, 0x18,
0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x44, 0x73, 0x12,
0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x65,
0x64, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2a, 0x36,
0x0a, 0x09, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x0a, 0x09, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x46,
0x75, 0x6c, 0x6c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x10, 0x01, 0x75, 0x6c, 0x6c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x10, 0x01,
0x12, 0x09, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53,
@ -9568,7 +9659,7 @@ var file_query_coord_proto_rawDesc = []byte{
0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e,
0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x32, 0xee, 0x14, 0x0a, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x32, 0xc0, 0x15, 0x0a,
0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x6c, 0x0a, 0x12, 0x47, 0x65, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x6c, 0x0a, 0x12, 0x47, 0x65,
0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73,
0x12, 0x2e, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x12, 0x2e, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
@ -9735,11 +9826,16 @@ var file_query_coord_proto_rawDesc = []byte{
0x75, 0x6e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x75, 0x6e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x28, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x1a, 0x28, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x41, 0x6e, 0x61, 0x6c, 0x79,
0x7a, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a,
0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x09, 0x44, 0x72, 0x6f, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x24, 0x2e, 0x6d, 0x69, 0x6c,
0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x70, 0x44, 0x72, 0x6f, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x1a, 0x1b, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x42,
0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69,
0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70,
0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72,
0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
var ( var (
@ -9755,7 +9851,7 @@ func file_query_coord_proto_rawDescGZIP() []byte {
} }
var file_query_coord_proto_enumTypes = make([]protoimpl.EnumInfo, 7) var file_query_coord_proto_enumTypes = make([]protoimpl.EnumInfo, 7)
var file_query_coord_proto_msgTypes = make([]protoimpl.MessageInfo, 113) var file_query_coord_proto_msgTypes = make([]protoimpl.MessageInfo, 114)
var file_query_coord_proto_goTypes = []interface{}{ var file_query_coord_proto_goTypes = []interface{}{
(LoadScope)(0), // 0: milvus.proto.query.LoadScope (LoadScope)(0), // 0: milvus.proto.query.LoadScope
(DataScope)(0), // 1: milvus.proto.query.DataScope (DataScope)(0), // 1: milvus.proto.query.DataScope
@ -9856,384 +9952,388 @@ var file_query_coord_proto_goTypes = []interface{}{
(*RunAnalyzerRequest)(nil), // 96: milvus.proto.query.RunAnalyzerRequest (*RunAnalyzerRequest)(nil), // 96: milvus.proto.query.RunAnalyzerRequest
(*ListLoadedSegmentsRequest)(nil), // 97: milvus.proto.query.ListLoadedSegmentsRequest (*ListLoadedSegmentsRequest)(nil), // 97: milvus.proto.query.ListLoadedSegmentsRequest
(*ListLoadedSegmentsResponse)(nil), // 98: milvus.proto.query.ListLoadedSegmentsResponse (*ListLoadedSegmentsResponse)(nil), // 98: milvus.proto.query.ListLoadedSegmentsResponse
nil, // 99: milvus.proto.query.LoadCollectionRequest.FieldIndexIDEntry (*DropIndexRequest)(nil), // 99: milvus.proto.query.DropIndexRequest
nil, // 100: milvus.proto.query.LoadPartitionsRequest.FieldIndexIDEntry nil, // 100: milvus.proto.query.LoadCollectionRequest.FieldIndexIDEntry
nil, // 101: milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry nil, // 101: milvus.proto.query.LoadPartitionsRequest.FieldIndexIDEntry
nil, // 102: milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry nil, // 102: milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry
nil, // 103: milvus.proto.query.WatchDmChannelsRequest.SealedSegmentRowCountEntry nil, // 103: milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry
nil, // 104: milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry nil, // 104: milvus.proto.query.WatchDmChannelsRequest.SealedSegmentRowCountEntry
nil, // 105: milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry nil, // 105: milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry
nil, // 106: milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry nil, // 106: milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry
nil, // 107: milvus.proto.query.LeaderView.SegmentDistEntry nil, // 107: milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry
nil, // 108: milvus.proto.query.LeaderView.GrowingSegmentsEntry nil, // 108: milvus.proto.query.LeaderView.SegmentDistEntry
nil, // 109: milvus.proto.query.LeaderView.PartitionStatsVersionsEntry nil, // 109: milvus.proto.query.LeaderView.GrowingSegmentsEntry
nil, // 110: milvus.proto.query.SegmentVersionInfo.IndexInfoEntry nil, // 110: milvus.proto.query.LeaderView.PartitionStatsVersionsEntry
nil, // 111: milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry nil, // 111: milvus.proto.query.SegmentVersionInfo.IndexInfoEntry
nil, // 112: milvus.proto.query.CollectionLoadInfo.FieldIndexIDEntry nil, // 112: milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry
nil, // 113: milvus.proto.query.PartitionLoadInfo.FieldIndexIDEntry nil, // 113: milvus.proto.query.CollectionLoadInfo.FieldIndexIDEntry
nil, // 114: milvus.proto.query.Replica.ChannelNodeInfosEntry nil, // 114: milvus.proto.query.PartitionLoadInfo.FieldIndexIDEntry
nil, // 115: milvus.proto.query.SyncAction.PartitionStatsVersionsEntry nil, // 115: milvus.proto.query.Replica.ChannelNodeInfosEntry
nil, // 116: milvus.proto.query.SyncAction.SealedSegmentRowCountEntry nil, // 116: milvus.proto.query.SyncAction.PartitionStatsVersionsEntry
nil, // 117: milvus.proto.query.ResourceGroupInfo.NumLoadedReplicaEntry nil, // 117: milvus.proto.query.SyncAction.SealedSegmentRowCountEntry
nil, // 118: milvus.proto.query.ResourceGroupInfo.NumOutgoingNodeEntry nil, // 118: milvus.proto.query.ResourceGroupInfo.NumLoadedReplicaEntry
nil, // 119: milvus.proto.query.ResourceGroupInfo.NumIncomingNodeEntry nil, // 119: milvus.proto.query.ResourceGroupInfo.NumOutgoingNodeEntry
(*commonpb.MsgBase)(nil), // 120: milvus.proto.common.MsgBase nil, // 120: milvus.proto.query.ResourceGroupInfo.NumIncomingNodeEntry
(*commonpb.Status)(nil), // 121: milvus.proto.common.Status (*commonpb.MsgBase)(nil), // 121: milvus.proto.common.MsgBase
(*schemapb.LongArray)(nil), // 122: milvus.proto.schema.LongArray (*commonpb.Status)(nil), // 122: milvus.proto.common.Status
(*schemapb.CollectionSchema)(nil), // 123: milvus.proto.schema.CollectionSchema (*schemapb.LongArray)(nil), // 123: milvus.proto.schema.LongArray
(commonpb.LoadPriority)(0), // 124: milvus.proto.common.LoadPriority (*schemapb.CollectionSchema)(nil), // 124: milvus.proto.schema.CollectionSchema
(*internalpb.GetStatisticsRequest)(nil), // 125: milvus.proto.internal.GetStatisticsRequest (commonpb.LoadPriority)(0), // 125: milvus.proto.common.LoadPriority
(*indexpb.IndexInfo)(nil), // 126: milvus.proto.index.IndexInfo (*internalpb.GetStatisticsRequest)(nil), // 126: milvus.proto.internal.GetStatisticsRequest
(*commonpb.KeyValuePair)(nil), // 127: milvus.proto.common.KeyValuePair (*indexpb.IndexInfo)(nil), // 127: milvus.proto.index.IndexInfo
(*datapb.VchannelInfo)(nil), // 128: milvus.proto.data.VchannelInfo (*commonpb.KeyValuePair)(nil), // 128: milvus.proto.common.KeyValuePair
(*datapb.SegmentInfo)(nil), // 129: milvus.proto.data.SegmentInfo (*datapb.VchannelInfo)(nil), // 129: milvus.proto.data.VchannelInfo
(*datapb.FieldBinlog)(nil), // 130: milvus.proto.data.FieldBinlog (*datapb.SegmentInfo)(nil), // 130: milvus.proto.data.SegmentInfo
(*msgpb.MsgPosition)(nil), // 131: milvus.proto.msg.MsgPosition (*datapb.FieldBinlog)(nil), // 131: milvus.proto.data.FieldBinlog
(datapb.SegmentLevel)(0), // 132: milvus.proto.data.SegmentLevel (*msgpb.MsgPosition)(nil), // 132: milvus.proto.msg.MsgPosition
(*internalpb.SearchRequest)(nil), // 133: milvus.proto.internal.SearchRequest (datapb.SegmentLevel)(0), // 133: milvus.proto.data.SegmentLevel
(*internalpb.RetrieveRequest)(nil), // 134: milvus.proto.internal.RetrieveRequest (*internalpb.SearchRequest)(nil), // 134: milvus.proto.internal.SearchRequest
(commonpb.SegmentState)(0), // 135: milvus.proto.common.SegmentState (*internalpb.RetrieveRequest)(nil), // 135: milvus.proto.internal.RetrieveRequest
(*rgpb.ResourceGroupConfig)(nil), // 136: milvus.proto.rg.ResourceGroupConfig (commonpb.SegmentState)(0), // 136: milvus.proto.common.SegmentState
(*commonpb.NodeInfo)(nil), // 137: milvus.proto.common.NodeInfo (*rgpb.ResourceGroupConfig)(nil), // 137: milvus.proto.rg.ResourceGroupConfig
(*schemapb.IDs)(nil), // 138: milvus.proto.schema.IDs (*commonpb.NodeInfo)(nil), // 138: milvus.proto.common.NodeInfo
(*datapb.TextIndexStats)(nil), // 139: milvus.proto.data.TextIndexStats (*schemapb.IDs)(nil), // 139: milvus.proto.schema.IDs
(*datapb.JsonKeyStats)(nil), // 140: milvus.proto.data.JsonKeyStats (*datapb.TextIndexStats)(nil), // 140: milvus.proto.data.TextIndexStats
(*internalpb.ShowConfigurationsRequest)(nil), // 141: milvus.proto.internal.ShowConfigurationsRequest (*datapb.JsonKeyStats)(nil), // 141: milvus.proto.data.JsonKeyStats
(*milvuspb.GetMetricsRequest)(nil), // 142: milvus.proto.milvus.GetMetricsRequest (*internalpb.ShowConfigurationsRequest)(nil), // 142: milvus.proto.internal.ShowConfigurationsRequest
(*milvuspb.GetReplicasRequest)(nil), // 143: milvus.proto.milvus.GetReplicasRequest (*milvuspb.GetMetricsRequest)(nil), // 143: milvus.proto.milvus.GetMetricsRequest
(*milvuspb.CheckHealthRequest)(nil), // 144: milvus.proto.milvus.CheckHealthRequest (*milvuspb.GetReplicasRequest)(nil), // 144: milvus.proto.milvus.GetReplicasRequest
(*milvuspb.CreateResourceGroupRequest)(nil), // 145: milvus.proto.milvus.CreateResourceGroupRequest (*milvuspb.CheckHealthRequest)(nil), // 145: milvus.proto.milvus.CheckHealthRequest
(*milvuspb.DropResourceGroupRequest)(nil), // 146: milvus.proto.milvus.DropResourceGroupRequest (*milvuspb.CreateResourceGroupRequest)(nil), // 146: milvus.proto.milvus.CreateResourceGroupRequest
(*milvuspb.TransferNodeRequest)(nil), // 147: milvus.proto.milvus.TransferNodeRequest (*milvuspb.DropResourceGroupRequest)(nil), // 147: milvus.proto.milvus.DropResourceGroupRequest
(*milvuspb.ListResourceGroupsRequest)(nil), // 148: milvus.proto.milvus.ListResourceGroupsRequest (*milvuspb.TransferNodeRequest)(nil), // 148: milvus.proto.milvus.TransferNodeRequest
(*milvuspb.GetComponentStatesRequest)(nil), // 149: milvus.proto.milvus.GetComponentStatesRequest (*milvuspb.ListResourceGroupsRequest)(nil), // 149: milvus.proto.milvus.ListResourceGroupsRequest
(*internalpb.GetTimeTickChannelRequest)(nil), // 150: milvus.proto.internal.GetTimeTickChannelRequest (*milvuspb.GetComponentStatesRequest)(nil), // 150: milvus.proto.milvus.GetComponentStatesRequest
(*internalpb.GetStatisticsChannelRequest)(nil), // 151: milvus.proto.internal.GetStatisticsChannelRequest (*internalpb.GetTimeTickChannelRequest)(nil), // 151: milvus.proto.internal.GetTimeTickChannelRequest
(*internalpb.ShowConfigurationsResponse)(nil), // 152: milvus.proto.internal.ShowConfigurationsResponse (*internalpb.GetStatisticsChannelRequest)(nil), // 152: milvus.proto.internal.GetStatisticsChannelRequest
(*milvuspb.GetMetricsResponse)(nil), // 153: milvus.proto.milvus.GetMetricsResponse (*internalpb.ShowConfigurationsResponse)(nil), // 153: milvus.proto.internal.ShowConfigurationsResponse
(*milvuspb.GetReplicasResponse)(nil), // 154: milvus.proto.milvus.GetReplicasResponse (*milvuspb.GetMetricsResponse)(nil), // 154: milvus.proto.milvus.GetMetricsResponse
(*milvuspb.CheckHealthResponse)(nil), // 155: milvus.proto.milvus.CheckHealthResponse (*milvuspb.GetReplicasResponse)(nil), // 155: milvus.proto.milvus.GetReplicasResponse
(*milvuspb.ListResourceGroupsResponse)(nil), // 156: milvus.proto.milvus.ListResourceGroupsResponse (*milvuspb.CheckHealthResponse)(nil), // 156: milvus.proto.milvus.CheckHealthResponse
(*milvuspb.ComponentStates)(nil), // 157: milvus.proto.milvus.ComponentStates (*milvuspb.ListResourceGroupsResponse)(nil), // 157: milvus.proto.milvus.ListResourceGroupsResponse
(*milvuspb.StringResponse)(nil), // 158: milvus.proto.milvus.StringResponse (*milvuspb.ComponentStates)(nil), // 158: milvus.proto.milvus.ComponentStates
(*internalpb.GetStatisticsResponse)(nil), // 159: milvus.proto.internal.GetStatisticsResponse (*milvuspb.StringResponse)(nil), // 159: milvus.proto.milvus.StringResponse
(*internalpb.SearchResults)(nil), // 160: milvus.proto.internal.SearchResults (*internalpb.GetStatisticsResponse)(nil), // 160: milvus.proto.internal.GetStatisticsResponse
(*internalpb.RetrieveResults)(nil), // 161: milvus.proto.internal.RetrieveResults (*internalpb.SearchResults)(nil), // 161: milvus.proto.internal.SearchResults
(*milvuspb.RunAnalyzerResponse)(nil), // 162: milvus.proto.milvus.RunAnalyzerResponse (*internalpb.RetrieveResults)(nil), // 162: milvus.proto.internal.RetrieveResults
(*milvuspb.RunAnalyzerResponse)(nil), // 163: milvus.proto.milvus.RunAnalyzerResponse
} }
var file_query_coord_proto_depIdxs = []int32{ var file_query_coord_proto_depIdxs = []int32{
120, // 0: milvus.proto.query.ShowCollectionsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 0: milvus.proto.query.ShowCollectionsRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 1: milvus.proto.query.ShowCollectionsResponse.status:type_name -> milvus.proto.common.Status 122, // 1: milvus.proto.query.ShowCollectionsResponse.status:type_name -> milvus.proto.common.Status
122, // 2: milvus.proto.query.ShowCollectionsResponse.load_fields:type_name -> milvus.proto.schema.LongArray 123, // 2: milvus.proto.query.ShowCollectionsResponse.load_fields:type_name -> milvus.proto.schema.LongArray
120, // 3: milvus.proto.query.ShowPartitionsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 3: milvus.proto.query.ShowPartitionsRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 4: milvus.proto.query.ShowPartitionsResponse.status:type_name -> milvus.proto.common.Status 122, // 4: milvus.proto.query.ShowPartitionsResponse.status:type_name -> milvus.proto.common.Status
120, // 5: milvus.proto.query.LoadCollectionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 5: milvus.proto.query.LoadCollectionRequest.base:type_name -> milvus.proto.common.MsgBase
123, // 6: milvus.proto.query.LoadCollectionRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 6: milvus.proto.query.LoadCollectionRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
99, // 7: milvus.proto.query.LoadCollectionRequest.field_indexID:type_name -> milvus.proto.query.LoadCollectionRequest.FieldIndexIDEntry 100, // 7: milvus.proto.query.LoadCollectionRequest.field_indexID:type_name -> milvus.proto.query.LoadCollectionRequest.FieldIndexIDEntry
124, // 8: milvus.proto.query.LoadCollectionRequest.priority:type_name -> milvus.proto.common.LoadPriority 125, // 8: milvus.proto.query.LoadCollectionRequest.priority:type_name -> milvus.proto.common.LoadPriority
120, // 9: milvus.proto.query.ReleaseCollectionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 9: milvus.proto.query.ReleaseCollectionRequest.base:type_name -> milvus.proto.common.MsgBase
125, // 10: milvus.proto.query.GetStatisticsRequest.req:type_name -> milvus.proto.internal.GetStatisticsRequest 126, // 10: milvus.proto.query.GetStatisticsRequest.req:type_name -> milvus.proto.internal.GetStatisticsRequest
1, // 11: milvus.proto.query.GetStatisticsRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 11: milvus.proto.query.GetStatisticsRequest.scope:type_name -> milvus.proto.query.DataScope
120, // 12: milvus.proto.query.LoadPartitionsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 12: milvus.proto.query.LoadPartitionsRequest.base:type_name -> milvus.proto.common.MsgBase
123, // 13: milvus.proto.query.LoadPartitionsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 13: milvus.proto.query.LoadPartitionsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
100, // 14: milvus.proto.query.LoadPartitionsRequest.field_indexID:type_name -> milvus.proto.query.LoadPartitionsRequest.FieldIndexIDEntry 101, // 14: milvus.proto.query.LoadPartitionsRequest.field_indexID:type_name -> milvus.proto.query.LoadPartitionsRequest.FieldIndexIDEntry
126, // 15: milvus.proto.query.LoadPartitionsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo 127, // 15: milvus.proto.query.LoadPartitionsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo
124, // 16: milvus.proto.query.LoadPartitionsRequest.priority:type_name -> milvus.proto.common.LoadPriority 125, // 16: milvus.proto.query.LoadPartitionsRequest.priority:type_name -> milvus.proto.common.LoadPriority
120, // 17: milvus.proto.query.ReleasePartitionsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 17: milvus.proto.query.ReleasePartitionsRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 18: milvus.proto.query.GetPartitionStatesRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 18: milvus.proto.query.GetPartitionStatesRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 19: milvus.proto.query.GetPartitionStatesResponse.status:type_name -> milvus.proto.common.Status 122, // 19: milvus.proto.query.GetPartitionStatesResponse.status:type_name -> milvus.proto.common.Status
43, // 20: milvus.proto.query.GetPartitionStatesResponse.partition_descriptions:type_name -> milvus.proto.query.PartitionStates 43, // 20: milvus.proto.query.GetPartitionStatesResponse.partition_descriptions:type_name -> milvus.proto.query.PartitionStates
120, // 21: milvus.proto.query.GetSegmentInfoRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 21: milvus.proto.query.GetSegmentInfoRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 22: milvus.proto.query.GetSegmentInfoResponse.status:type_name -> milvus.proto.common.Status 122, // 22: milvus.proto.query.GetSegmentInfoResponse.status:type_name -> milvus.proto.common.Status
44, // 23: milvus.proto.query.GetSegmentInfoResponse.infos:type_name -> milvus.proto.query.SegmentInfo 44, // 23: milvus.proto.query.GetSegmentInfoResponse.infos:type_name -> milvus.proto.query.SegmentInfo
120, // 24: milvus.proto.query.GetShardLeadersRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 24: milvus.proto.query.GetShardLeadersRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 25: milvus.proto.query.GetShardLeadersResponse.status:type_name -> milvus.proto.common.Status 122, // 25: milvus.proto.query.GetShardLeadersResponse.status:type_name -> milvus.proto.common.Status
23, // 26: milvus.proto.query.GetShardLeadersResponse.shards:type_name -> milvus.proto.query.ShardLeadersList 23, // 26: milvus.proto.query.GetShardLeadersResponse.shards:type_name -> milvus.proto.query.ShardLeadersList
120, // 27: milvus.proto.query.UpdateResourceGroupsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 27: milvus.proto.query.UpdateResourceGroupsRequest.base:type_name -> milvus.proto.common.MsgBase
101, // 28: milvus.proto.query.UpdateResourceGroupsRequest.resource_groups:type_name -> milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry 102, // 28: milvus.proto.query.UpdateResourceGroupsRequest.resource_groups:type_name -> milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry
120, // 29: milvus.proto.query.SyncNewCreatedPartitionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 29: milvus.proto.query.SyncNewCreatedPartitionRequest.base:type_name -> milvus.proto.common.MsgBase
4, // 30: milvus.proto.query.LoadMetaInfo.load_type:type_name -> milvus.proto.query.LoadType 4, // 30: milvus.proto.query.LoadMetaInfo.load_type:type_name -> milvus.proto.query.LoadType
127, // 31: milvus.proto.query.LoadMetaInfo.db_properties:type_name -> milvus.proto.common.KeyValuePair 128, // 31: milvus.proto.query.LoadMetaInfo.db_properties:type_name -> milvus.proto.common.KeyValuePair
120, // 32: milvus.proto.query.WatchDmChannelsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 32: milvus.proto.query.WatchDmChannelsRequest.base:type_name -> milvus.proto.common.MsgBase
128, // 33: milvus.proto.query.WatchDmChannelsRequest.infos:type_name -> milvus.proto.data.VchannelInfo 129, // 33: milvus.proto.query.WatchDmChannelsRequest.infos:type_name -> milvus.proto.data.VchannelInfo
123, // 34: milvus.proto.query.WatchDmChannelsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 34: milvus.proto.query.WatchDmChannelsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
129, // 35: milvus.proto.query.WatchDmChannelsRequest.exclude_infos:type_name -> milvus.proto.data.SegmentInfo 130, // 35: milvus.proto.query.WatchDmChannelsRequest.exclude_infos:type_name -> milvus.proto.data.SegmentInfo
25, // 36: milvus.proto.query.WatchDmChannelsRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo 25, // 36: milvus.proto.query.WatchDmChannelsRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo
102, // 37: milvus.proto.query.WatchDmChannelsRequest.segment_infos:type_name -> milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry 103, // 37: milvus.proto.query.WatchDmChannelsRequest.segment_infos:type_name -> milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry
126, // 38: milvus.proto.query.WatchDmChannelsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo 127, // 38: milvus.proto.query.WatchDmChannelsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo
103, // 39: milvus.proto.query.WatchDmChannelsRequest.sealed_segment_row_count:type_name -> milvus.proto.query.WatchDmChannelsRequest.SealedSegmentRowCountEntry 104, // 39: milvus.proto.query.WatchDmChannelsRequest.sealed_segment_row_count:type_name -> milvus.proto.query.WatchDmChannelsRequest.SealedSegmentRowCountEntry
120, // 40: milvus.proto.query.UnsubDmChannelRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 40: milvus.proto.query.UnsubDmChannelRequest.base:type_name -> milvus.proto.common.MsgBase
130, // 41: milvus.proto.query.SegmentLoadInfo.binlog_paths:type_name -> milvus.proto.data.FieldBinlog 131, // 41: milvus.proto.query.SegmentLoadInfo.binlog_paths:type_name -> milvus.proto.data.FieldBinlog
130, // 42: milvus.proto.query.SegmentLoadInfo.statslogs:type_name -> milvus.proto.data.FieldBinlog 131, // 42: milvus.proto.query.SegmentLoadInfo.statslogs:type_name -> milvus.proto.data.FieldBinlog
130, // 43: milvus.proto.query.SegmentLoadInfo.deltalogs:type_name -> milvus.proto.data.FieldBinlog 131, // 43: milvus.proto.query.SegmentLoadInfo.deltalogs:type_name -> milvus.proto.data.FieldBinlog
29, // 44: milvus.proto.query.SegmentLoadInfo.index_infos:type_name -> milvus.proto.query.FieldIndexInfo 29, // 44: milvus.proto.query.SegmentLoadInfo.index_infos:type_name -> milvus.proto.query.FieldIndexInfo
131, // 45: milvus.proto.query.SegmentLoadInfo.start_position:type_name -> milvus.proto.msg.MsgPosition 132, // 45: milvus.proto.query.SegmentLoadInfo.start_position:type_name -> milvus.proto.msg.MsgPosition
131, // 46: milvus.proto.query.SegmentLoadInfo.delta_position:type_name -> milvus.proto.msg.MsgPosition 132, // 46: milvus.proto.query.SegmentLoadInfo.delta_position:type_name -> milvus.proto.msg.MsgPosition
132, // 47: milvus.proto.query.SegmentLoadInfo.level:type_name -> milvus.proto.data.SegmentLevel 133, // 47: milvus.proto.query.SegmentLoadInfo.level:type_name -> milvus.proto.data.SegmentLevel
104, // 48: milvus.proto.query.SegmentLoadInfo.textStatsLogs:type_name -> milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry 105, // 48: milvus.proto.query.SegmentLoadInfo.textStatsLogs:type_name -> milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry
130, // 49: milvus.proto.query.SegmentLoadInfo.bm25logs:type_name -> milvus.proto.data.FieldBinlog 131, // 49: milvus.proto.query.SegmentLoadInfo.bm25logs:type_name -> milvus.proto.data.FieldBinlog
105, // 50: milvus.proto.query.SegmentLoadInfo.jsonKeyStatsLogs:type_name -> milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry 106, // 50: milvus.proto.query.SegmentLoadInfo.jsonKeyStatsLogs:type_name -> milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry
124, // 51: milvus.proto.query.SegmentLoadInfo.priority:type_name -> milvus.proto.common.LoadPriority 125, // 51: milvus.proto.query.SegmentLoadInfo.priority:type_name -> milvus.proto.common.LoadPriority
127, // 52: milvus.proto.query.FieldIndexInfo.index_params:type_name -> milvus.proto.common.KeyValuePair 128, // 52: milvus.proto.query.FieldIndexInfo.index_params:type_name -> milvus.proto.common.KeyValuePair
120, // 53: milvus.proto.query.LoadSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 53: milvus.proto.query.LoadSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase
28, // 54: milvus.proto.query.LoadSegmentsRequest.infos:type_name -> milvus.proto.query.SegmentLoadInfo 28, // 54: milvus.proto.query.LoadSegmentsRequest.infos:type_name -> milvus.proto.query.SegmentLoadInfo
123, // 55: milvus.proto.query.LoadSegmentsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 55: milvus.proto.query.LoadSegmentsRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
25, // 56: milvus.proto.query.LoadSegmentsRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo 25, // 56: milvus.proto.query.LoadSegmentsRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo
131, // 57: milvus.proto.query.LoadSegmentsRequest.delta_positions:type_name -> milvus.proto.msg.MsgPosition 132, // 57: milvus.proto.query.LoadSegmentsRequest.delta_positions:type_name -> milvus.proto.msg.MsgPosition
0, // 58: milvus.proto.query.LoadSegmentsRequest.load_scope:type_name -> milvus.proto.query.LoadScope 0, // 58: milvus.proto.query.LoadSegmentsRequest.load_scope:type_name -> milvus.proto.query.LoadScope
126, // 59: milvus.proto.query.LoadSegmentsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo 127, // 59: milvus.proto.query.LoadSegmentsRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo
120, // 60: milvus.proto.query.ReleaseSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 60: milvus.proto.query.ReleaseSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase
1, // 61: milvus.proto.query.ReleaseSegmentsRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 61: milvus.proto.query.ReleaseSegmentsRequest.scope:type_name -> milvus.proto.query.DataScope
131, // 62: milvus.proto.query.ReleaseSegmentsRequest.checkpoint:type_name -> milvus.proto.msg.MsgPosition 132, // 62: milvus.proto.query.ReleaseSegmentsRequest.checkpoint:type_name -> milvus.proto.msg.MsgPosition
133, // 63: milvus.proto.query.SearchRequest.req:type_name -> milvus.proto.internal.SearchRequest 134, // 63: milvus.proto.query.SearchRequest.req:type_name -> milvus.proto.internal.SearchRequest
1, // 64: milvus.proto.query.SearchRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 64: milvus.proto.query.SearchRequest.scope:type_name -> milvus.proto.query.DataScope
134, // 65: milvus.proto.query.QueryRequest.req:type_name -> milvus.proto.internal.RetrieveRequest 135, // 65: milvus.proto.query.QueryRequest.req:type_name -> milvus.proto.internal.RetrieveRequest
1, // 66: milvus.proto.query.QueryRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 66: milvus.proto.query.QueryRequest.scope:type_name -> milvus.proto.query.DataScope
120, // 67: milvus.proto.query.SyncReplicaSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 67: milvus.proto.query.SyncReplicaSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase
36, // 68: milvus.proto.query.SyncReplicaSegmentsRequest.replica_segments:type_name -> milvus.proto.query.ReplicaSegmentsInfo 36, // 68: milvus.proto.query.SyncReplicaSegmentsRequest.replica_segments:type_name -> milvus.proto.query.ReplicaSegmentsInfo
120, // 69: milvus.proto.query.GetLoadInfoRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 69: milvus.proto.query.GetLoadInfoRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 70: milvus.proto.query.GetLoadInfoResponse.status:type_name -> milvus.proto.common.Status 122, // 70: milvus.proto.query.GetLoadInfoResponse.status:type_name -> milvus.proto.common.Status
123, // 71: milvus.proto.query.GetLoadInfoResponse.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 71: milvus.proto.query.GetLoadInfoResponse.schema:type_name -> milvus.proto.schema.CollectionSchema
4, // 72: milvus.proto.query.GetLoadInfoResponse.load_type:type_name -> milvus.proto.query.LoadType 4, // 72: milvus.proto.query.GetLoadInfoResponse.load_type:type_name -> milvus.proto.query.LoadType
120, // 73: milvus.proto.query.HandoffSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 73: milvus.proto.query.HandoffSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase
44, // 74: milvus.proto.query.HandoffSegmentsRequest.segmentInfos:type_name -> milvus.proto.query.SegmentInfo 44, // 74: milvus.proto.query.HandoffSegmentsRequest.segmentInfos:type_name -> milvus.proto.query.SegmentInfo
120, // 75: milvus.proto.query.LoadBalanceRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 75: milvus.proto.query.LoadBalanceRequest.base:type_name -> milvus.proto.common.MsgBase
3, // 76: milvus.proto.query.LoadBalanceRequest.balance_reason:type_name -> milvus.proto.query.TriggerCondition 3, // 76: milvus.proto.query.LoadBalanceRequest.balance_reason:type_name -> milvus.proto.query.TriggerCondition
44, // 77: milvus.proto.query.QueryChannelInfo.global_sealed_segments:type_name -> milvus.proto.query.SegmentInfo 44, // 77: milvus.proto.query.QueryChannelInfo.global_sealed_segments:type_name -> milvus.proto.query.SegmentInfo
131, // 78: milvus.proto.query.QueryChannelInfo.seek_position:type_name -> milvus.proto.msg.MsgPosition 132, // 78: milvus.proto.query.QueryChannelInfo.seek_position:type_name -> milvus.proto.msg.MsgPosition
2, // 79: milvus.proto.query.PartitionStates.state:type_name -> milvus.proto.query.PartitionState 2, // 79: milvus.proto.query.PartitionStates.state:type_name -> milvus.proto.query.PartitionState
135, // 80: milvus.proto.query.SegmentInfo.segment_state:type_name -> milvus.proto.common.SegmentState 136, // 80: milvus.proto.query.SegmentInfo.segment_state:type_name -> milvus.proto.common.SegmentState
29, // 81: milvus.proto.query.SegmentInfo.index_infos:type_name -> milvus.proto.query.FieldIndexInfo 29, // 81: milvus.proto.query.SegmentInfo.index_infos:type_name -> milvus.proto.query.FieldIndexInfo
132, // 82: milvus.proto.query.SegmentInfo.level:type_name -> milvus.proto.data.SegmentLevel 133, // 82: milvus.proto.query.SegmentInfo.level:type_name -> milvus.proto.data.SegmentLevel
43, // 83: milvus.proto.query.CollectionInfo.partition_states:type_name -> milvus.proto.query.PartitionStates 43, // 83: milvus.proto.query.CollectionInfo.partition_states:type_name -> milvus.proto.query.PartitionStates
4, // 84: milvus.proto.query.CollectionInfo.load_type:type_name -> milvus.proto.query.LoadType 4, // 84: milvus.proto.query.CollectionInfo.load_type:type_name -> milvus.proto.query.LoadType
123, // 85: milvus.proto.query.CollectionInfo.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 85: milvus.proto.query.CollectionInfo.schema:type_name -> milvus.proto.schema.CollectionSchema
46, // 86: milvus.proto.query.UnsubscribeChannelInfo.collection_channels:type_name -> milvus.proto.query.UnsubscribeChannels 46, // 86: milvus.proto.query.UnsubscribeChannelInfo.collection_channels:type_name -> milvus.proto.query.UnsubscribeChannels
44, // 87: milvus.proto.query.SegmentChangeInfo.online_segments:type_name -> milvus.proto.query.SegmentInfo 44, // 87: milvus.proto.query.SegmentChangeInfo.online_segments:type_name -> milvus.proto.query.SegmentInfo
44, // 88: milvus.proto.query.SegmentChangeInfo.offline_segments:type_name -> milvus.proto.query.SegmentInfo 44, // 88: milvus.proto.query.SegmentChangeInfo.offline_segments:type_name -> milvus.proto.query.SegmentInfo
120, // 89: milvus.proto.query.SealedSegmentsChangeInfo.base:type_name -> milvus.proto.common.MsgBase 121, // 89: milvus.proto.query.SealedSegmentsChangeInfo.base:type_name -> milvus.proto.common.MsgBase
48, // 90: milvus.proto.query.SealedSegmentsChangeInfo.infos:type_name -> milvus.proto.query.SegmentChangeInfo 48, // 90: milvus.proto.query.SealedSegmentsChangeInfo.infos:type_name -> milvus.proto.query.SegmentChangeInfo
120, // 91: milvus.proto.query.GetDataDistributionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 91: milvus.proto.query.GetDataDistributionRequest.base:type_name -> milvus.proto.common.MsgBase
106, // 92: milvus.proto.query.GetDataDistributionRequest.checkpoints:type_name -> milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry 107, // 92: milvus.proto.query.GetDataDistributionRequest.checkpoints:type_name -> milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry
121, // 93: milvus.proto.query.GetDataDistributionResponse.status:type_name -> milvus.proto.common.Status 122, // 93: milvus.proto.query.GetDataDistributionResponse.status:type_name -> milvus.proto.common.Status
55, // 94: milvus.proto.query.GetDataDistributionResponse.segments:type_name -> milvus.proto.query.SegmentVersionInfo 55, // 94: milvus.proto.query.GetDataDistributionResponse.segments:type_name -> milvus.proto.query.SegmentVersionInfo
56, // 95: milvus.proto.query.GetDataDistributionResponse.channels:type_name -> milvus.proto.query.ChannelVersionInfo 56, // 95: milvus.proto.query.GetDataDistributionResponse.channels:type_name -> milvus.proto.query.ChannelVersionInfo
52, // 96: milvus.proto.query.GetDataDistributionResponse.leader_views:type_name -> milvus.proto.query.LeaderView 52, // 96: milvus.proto.query.GetDataDistributionResponse.leader_views:type_name -> milvus.proto.query.LeaderView
107, // 97: milvus.proto.query.LeaderView.segment_dist:type_name -> milvus.proto.query.LeaderView.SegmentDistEntry 108, // 97: milvus.proto.query.LeaderView.segment_dist:type_name -> milvus.proto.query.LeaderView.SegmentDistEntry
108, // 98: milvus.proto.query.LeaderView.growing_segments:type_name -> milvus.proto.query.LeaderView.GrowingSegmentsEntry 109, // 98: milvus.proto.query.LeaderView.growing_segments:type_name -> milvus.proto.query.LeaderView.GrowingSegmentsEntry
109, // 99: milvus.proto.query.LeaderView.partition_stats_versions:type_name -> milvus.proto.query.LeaderView.PartitionStatsVersionsEntry 110, // 99: milvus.proto.query.LeaderView.partition_stats_versions:type_name -> milvus.proto.query.LeaderView.PartitionStatsVersionsEntry
53, // 100: milvus.proto.query.LeaderView.status:type_name -> milvus.proto.query.LeaderViewStatus 53, // 100: milvus.proto.query.LeaderView.status:type_name -> milvus.proto.query.LeaderViewStatus
110, // 101: milvus.proto.query.SegmentVersionInfo.index_info:type_name -> milvus.proto.query.SegmentVersionInfo.IndexInfoEntry 111, // 101: milvus.proto.query.SegmentVersionInfo.index_info:type_name -> milvus.proto.query.SegmentVersionInfo.IndexInfoEntry
132, // 102: milvus.proto.query.SegmentVersionInfo.level:type_name -> milvus.proto.data.SegmentLevel 133, // 102: milvus.proto.query.SegmentVersionInfo.level:type_name -> milvus.proto.data.SegmentLevel
111, // 103: milvus.proto.query.SegmentVersionInfo.json_stats_info:type_name -> milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry 112, // 103: milvus.proto.query.SegmentVersionInfo.json_stats_info:type_name -> milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry
5, // 104: milvus.proto.query.CollectionLoadInfo.status:type_name -> milvus.proto.query.LoadStatus 5, // 104: milvus.proto.query.CollectionLoadInfo.status:type_name -> milvus.proto.query.LoadStatus
112, // 105: milvus.proto.query.CollectionLoadInfo.field_indexID:type_name -> milvus.proto.query.CollectionLoadInfo.FieldIndexIDEntry 113, // 105: milvus.proto.query.CollectionLoadInfo.field_indexID:type_name -> milvus.proto.query.CollectionLoadInfo.FieldIndexIDEntry
4, // 106: milvus.proto.query.CollectionLoadInfo.load_type:type_name -> milvus.proto.query.LoadType 4, // 106: milvus.proto.query.CollectionLoadInfo.load_type:type_name -> milvus.proto.query.LoadType
5, // 107: milvus.proto.query.PartitionLoadInfo.status:type_name -> milvus.proto.query.LoadStatus 5, // 107: milvus.proto.query.PartitionLoadInfo.status:type_name -> milvus.proto.query.LoadStatus
113, // 108: milvus.proto.query.PartitionLoadInfo.field_indexID:type_name -> milvus.proto.query.PartitionLoadInfo.FieldIndexIDEntry 114, // 108: milvus.proto.query.PartitionLoadInfo.field_indexID:type_name -> milvus.proto.query.PartitionLoadInfo.FieldIndexIDEntry
114, // 109: milvus.proto.query.Replica.channel_node_infos:type_name -> milvus.proto.query.Replica.ChannelNodeInfosEntry 115, // 109: milvus.proto.query.Replica.channel_node_infos:type_name -> milvus.proto.query.Replica.ChannelNodeInfosEntry
6, // 110: milvus.proto.query.SyncAction.type:type_name -> milvus.proto.query.SyncType 6, // 110: milvus.proto.query.SyncAction.type:type_name -> milvus.proto.query.SyncType
28, // 111: milvus.proto.query.SyncAction.info:type_name -> milvus.proto.query.SegmentLoadInfo 28, // 111: milvus.proto.query.SyncAction.info:type_name -> milvus.proto.query.SegmentLoadInfo
131, // 112: milvus.proto.query.SyncAction.checkpoint:type_name -> milvus.proto.msg.MsgPosition 132, // 112: milvus.proto.query.SyncAction.checkpoint:type_name -> milvus.proto.msg.MsgPosition
115, // 113: milvus.proto.query.SyncAction.partition_stats_versions:type_name -> milvus.proto.query.SyncAction.PartitionStatsVersionsEntry 116, // 113: milvus.proto.query.SyncAction.partition_stats_versions:type_name -> milvus.proto.query.SyncAction.PartitionStatsVersionsEntry
131, // 114: milvus.proto.query.SyncAction.deleteCP:type_name -> milvus.proto.msg.MsgPosition 132, // 114: milvus.proto.query.SyncAction.deleteCP:type_name -> milvus.proto.msg.MsgPosition
116, // 115: milvus.proto.query.SyncAction.sealed_segment_row_count:type_name -> milvus.proto.query.SyncAction.SealedSegmentRowCountEntry 117, // 115: milvus.proto.query.SyncAction.sealed_segment_row_count:type_name -> milvus.proto.query.SyncAction.SealedSegmentRowCountEntry
120, // 116: milvus.proto.query.SyncDistributionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 116: milvus.proto.query.SyncDistributionRequest.base:type_name -> milvus.proto.common.MsgBase
61, // 117: milvus.proto.query.SyncDistributionRequest.actions:type_name -> milvus.proto.query.SyncAction 61, // 117: milvus.proto.query.SyncDistributionRequest.actions:type_name -> milvus.proto.query.SyncAction
123, // 118: milvus.proto.query.SyncDistributionRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 118: milvus.proto.query.SyncDistributionRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
25, // 119: milvus.proto.query.SyncDistributionRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo 25, // 119: milvus.proto.query.SyncDistributionRequest.load_meta:type_name -> milvus.proto.query.LoadMetaInfo
126, // 120: milvus.proto.query.SyncDistributionRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo 127, // 120: milvus.proto.query.SyncDistributionRequest.index_info_list:type_name -> milvus.proto.index.IndexInfo
136, // 121: milvus.proto.query.ResourceGroup.config:type_name -> milvus.proto.rg.ResourceGroupConfig 137, // 121: milvus.proto.query.ResourceGroup.config:type_name -> milvus.proto.rg.ResourceGroupConfig
120, // 122: milvus.proto.query.TransferReplicaRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 122: milvus.proto.query.TransferReplicaRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 123: milvus.proto.query.DescribeResourceGroupRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 123: milvus.proto.query.DescribeResourceGroupRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 124: milvus.proto.query.DescribeResourceGroupResponse.status:type_name -> milvus.proto.common.Status 122, // 124: milvus.proto.query.DescribeResourceGroupResponse.status:type_name -> milvus.proto.common.Status
67, // 125: milvus.proto.query.DescribeResourceGroupResponse.resource_group:type_name -> milvus.proto.query.ResourceGroupInfo 67, // 125: milvus.proto.query.DescribeResourceGroupResponse.resource_group:type_name -> milvus.proto.query.ResourceGroupInfo
117, // 126: milvus.proto.query.ResourceGroupInfo.num_loaded_replica:type_name -> milvus.proto.query.ResourceGroupInfo.NumLoadedReplicaEntry 118, // 126: milvus.proto.query.ResourceGroupInfo.num_loaded_replica:type_name -> milvus.proto.query.ResourceGroupInfo.NumLoadedReplicaEntry
118, // 127: milvus.proto.query.ResourceGroupInfo.num_outgoing_node:type_name -> milvus.proto.query.ResourceGroupInfo.NumOutgoingNodeEntry 119, // 127: milvus.proto.query.ResourceGroupInfo.num_outgoing_node:type_name -> milvus.proto.query.ResourceGroupInfo.NumOutgoingNodeEntry
119, // 128: milvus.proto.query.ResourceGroupInfo.num_incoming_node:type_name -> milvus.proto.query.ResourceGroupInfo.NumIncomingNodeEntry 120, // 128: milvus.proto.query.ResourceGroupInfo.num_incoming_node:type_name -> milvus.proto.query.ResourceGroupInfo.NumIncomingNodeEntry
136, // 129: milvus.proto.query.ResourceGroupInfo.config:type_name -> milvus.proto.rg.ResourceGroupConfig 137, // 129: milvus.proto.query.ResourceGroupInfo.config:type_name -> milvus.proto.rg.ResourceGroupConfig
137, // 130: milvus.proto.query.ResourceGroupInfo.nodes:type_name -> milvus.proto.common.NodeInfo 138, // 130: milvus.proto.query.ResourceGroupInfo.nodes:type_name -> milvus.proto.common.NodeInfo
120, // 131: milvus.proto.query.DeleteRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 131: milvus.proto.query.DeleteRequest.base:type_name -> milvus.proto.common.MsgBase
138, // 132: milvus.proto.query.DeleteRequest.primary_keys:type_name -> milvus.proto.schema.IDs 139, // 132: milvus.proto.query.DeleteRequest.primary_keys:type_name -> milvus.proto.schema.IDs
1, // 133: milvus.proto.query.DeleteRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 133: milvus.proto.query.DeleteRequest.scope:type_name -> milvus.proto.query.DataScope
120, // 134: milvus.proto.query.DeleteBatchRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 134: milvus.proto.query.DeleteBatchRequest.base:type_name -> milvus.proto.common.MsgBase
138, // 135: milvus.proto.query.DeleteBatchRequest.primary_keys:type_name -> milvus.proto.schema.IDs 139, // 135: milvus.proto.query.DeleteBatchRequest.primary_keys:type_name -> milvus.proto.schema.IDs
1, // 136: milvus.proto.query.DeleteBatchRequest.scope:type_name -> milvus.proto.query.DataScope 1, // 136: milvus.proto.query.DeleteBatchRequest.scope:type_name -> milvus.proto.query.DataScope
121, // 137: milvus.proto.query.DeleteBatchResponse.status:type_name -> milvus.proto.common.Status 122, // 137: milvus.proto.query.DeleteBatchResponse.status:type_name -> milvus.proto.common.Status
120, // 138: milvus.proto.query.ActivateCheckerRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 138: milvus.proto.query.ActivateCheckerRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 139: milvus.proto.query.DeactivateCheckerRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 139: milvus.proto.query.DeactivateCheckerRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 140: milvus.proto.query.ListCheckersRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 140: milvus.proto.query.ListCheckersRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 141: milvus.proto.query.ListCheckersResponse.status:type_name -> milvus.proto.common.Status 122, // 141: milvus.proto.query.ListCheckersResponse.status:type_name -> milvus.proto.common.Status
75, // 142: milvus.proto.query.ListCheckersResponse.checkerInfos:type_name -> milvus.proto.query.CheckerInfo 75, // 142: milvus.proto.query.ListCheckersResponse.checkerInfos:type_name -> milvus.proto.query.CheckerInfo
132, // 143: milvus.proto.query.SegmentTarget.level:type_name -> milvus.proto.data.SegmentLevel 133, // 143: milvus.proto.query.SegmentTarget.level:type_name -> milvus.proto.data.SegmentLevel
76, // 144: milvus.proto.query.PartitionTarget.segments:type_name -> milvus.proto.query.SegmentTarget 76, // 144: milvus.proto.query.PartitionTarget.segments:type_name -> milvus.proto.query.SegmentTarget
77, // 145: milvus.proto.query.ChannelTarget.partition_targets:type_name -> milvus.proto.query.PartitionTarget 77, // 145: milvus.proto.query.ChannelTarget.partition_targets:type_name -> milvus.proto.query.PartitionTarget
131, // 146: milvus.proto.query.ChannelTarget.seek_position:type_name -> milvus.proto.msg.MsgPosition 132, // 146: milvus.proto.query.ChannelTarget.seek_position:type_name -> milvus.proto.msg.MsgPosition
131, // 147: milvus.proto.query.ChannelTarget.delete_checkpoint:type_name -> milvus.proto.msg.MsgPosition 132, // 147: milvus.proto.query.ChannelTarget.delete_checkpoint:type_name -> milvus.proto.msg.MsgPosition
78, // 148: milvus.proto.query.CollectionTarget.Channel_targets:type_name -> milvus.proto.query.ChannelTarget 78, // 148: milvus.proto.query.CollectionTarget.Channel_targets:type_name -> milvus.proto.query.ChannelTarget
120, // 149: milvus.proto.query.ListQueryNodeRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 149: milvus.proto.query.ListQueryNodeRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 150: milvus.proto.query.ListQueryNodeResponse.status:type_name -> milvus.proto.common.Status 122, // 150: milvus.proto.query.ListQueryNodeResponse.status:type_name -> milvus.proto.common.Status
80, // 151: milvus.proto.query.ListQueryNodeResponse.nodeInfos:type_name -> milvus.proto.query.NodeInfo 80, // 151: milvus.proto.query.ListQueryNodeResponse.nodeInfos:type_name -> milvus.proto.query.NodeInfo
120, // 152: milvus.proto.query.GetQueryNodeDistributionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 152: milvus.proto.query.GetQueryNodeDistributionRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 153: milvus.proto.query.GetQueryNodeDistributionResponse.status:type_name -> milvus.proto.common.Status 122, // 153: milvus.proto.query.GetQueryNodeDistributionResponse.status:type_name -> milvus.proto.common.Status
120, // 154: milvus.proto.query.SuspendBalanceRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 154: milvus.proto.query.SuspendBalanceRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 155: milvus.proto.query.ResumeBalanceRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 155: milvus.proto.query.ResumeBalanceRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 156: milvus.proto.query.CheckBalanceStatusRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 156: milvus.proto.query.CheckBalanceStatusRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 157: milvus.proto.query.CheckBalanceStatusResponse.status:type_name -> milvus.proto.common.Status 122, // 157: milvus.proto.query.CheckBalanceStatusResponse.status:type_name -> milvus.proto.common.Status
120, // 158: milvus.proto.query.SuspendNodeRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 158: milvus.proto.query.SuspendNodeRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 159: milvus.proto.query.ResumeNodeRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 159: milvus.proto.query.ResumeNodeRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 160: milvus.proto.query.TransferSegmentRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 160: milvus.proto.query.TransferSegmentRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 161: milvus.proto.query.TransferChannelRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 161: milvus.proto.query.TransferChannelRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 162: milvus.proto.query.CheckQueryNodeDistributionRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 162: milvus.proto.query.CheckQueryNodeDistributionRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 163: milvus.proto.query.UpdateLoadConfigRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 163: milvus.proto.query.UpdateLoadConfigRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 164: milvus.proto.query.UpdateSchemaRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 164: milvus.proto.query.UpdateSchemaRequest.base:type_name -> milvus.proto.common.MsgBase
123, // 165: milvus.proto.query.UpdateSchemaRequest.schema:type_name -> milvus.proto.schema.CollectionSchema 124, // 165: milvus.proto.query.UpdateSchemaRequest.schema:type_name -> milvus.proto.schema.CollectionSchema
120, // 166: milvus.proto.query.RunAnalyzerRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 166: milvus.proto.query.RunAnalyzerRequest.base:type_name -> milvus.proto.common.MsgBase
120, // 167: milvus.proto.query.ListLoadedSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase 121, // 167: milvus.proto.query.ListLoadedSegmentsRequest.base:type_name -> milvus.proto.common.MsgBase
121, // 168: milvus.proto.query.ListLoadedSegmentsResponse.status:type_name -> milvus.proto.common.Status 122, // 168: milvus.proto.query.ListLoadedSegmentsResponse.status:type_name -> milvus.proto.common.Status
136, // 169: milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry.value:type_name -> milvus.proto.rg.ResourceGroupConfig 121, // 169: milvus.proto.query.DropIndexRequest.base:type_name -> milvus.proto.common.MsgBase
129, // 170: milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry.value:type_name -> milvus.proto.data.SegmentInfo 137, // 170: milvus.proto.query.UpdateResourceGroupsRequest.ResourceGroupsEntry.value:type_name -> milvus.proto.rg.ResourceGroupConfig
139, // 171: milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry.value:type_name -> milvus.proto.data.TextIndexStats 130, // 171: milvus.proto.query.WatchDmChannelsRequest.SegmentInfosEntry.value:type_name -> milvus.proto.data.SegmentInfo
140, // 172: milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry.value:type_name -> milvus.proto.data.JsonKeyStats 140, // 172: milvus.proto.query.SegmentLoadInfo.TextStatsLogsEntry.value:type_name -> milvus.proto.data.TextIndexStats
131, // 173: milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry.value:type_name -> milvus.proto.msg.MsgPosition 141, // 173: milvus.proto.query.SegmentLoadInfo.JsonKeyStatsLogsEntry.value:type_name -> milvus.proto.data.JsonKeyStats
54, // 174: milvus.proto.query.LeaderView.SegmentDistEntry.value:type_name -> milvus.proto.query.SegmentDist 132, // 174: milvus.proto.query.GetDataDistributionRequest.CheckpointsEntry.value:type_name -> milvus.proto.msg.MsgPosition
131, // 175: milvus.proto.query.LeaderView.GrowingSegmentsEntry.value:type_name -> milvus.proto.msg.MsgPosition 54, // 175: milvus.proto.query.LeaderView.SegmentDistEntry.value:type_name -> milvus.proto.query.SegmentDist
29, // 176: milvus.proto.query.SegmentVersionInfo.IndexInfoEntry.value:type_name -> milvus.proto.query.FieldIndexInfo 132, // 176: milvus.proto.query.LeaderView.GrowingSegmentsEntry.value:type_name -> milvus.proto.msg.MsgPosition
30, // 177: milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry.value:type_name -> milvus.proto.query.JsonStatsInfo 29, // 177: milvus.proto.query.SegmentVersionInfo.IndexInfoEntry.value:type_name -> milvus.proto.query.FieldIndexInfo
59, // 178: milvus.proto.query.Replica.ChannelNodeInfosEntry.value:type_name -> milvus.proto.query.ChannelNodeInfo 30, // 178: milvus.proto.query.SegmentVersionInfo.JsonStatsInfoEntry.value:type_name -> milvus.proto.query.JsonStatsInfo
7, // 179: milvus.proto.query.QueryCoord.ShowLoadCollections:input_type -> milvus.proto.query.ShowCollectionsRequest 59, // 179: milvus.proto.query.Replica.ChannelNodeInfosEntry.value:type_name -> milvus.proto.query.ChannelNodeInfo
9, // 180: milvus.proto.query.QueryCoord.ShowLoadPartitions:input_type -> milvus.proto.query.ShowPartitionsRequest 7, // 180: milvus.proto.query.QueryCoord.ShowLoadCollections:input_type -> milvus.proto.query.ShowCollectionsRequest
14, // 181: milvus.proto.query.QueryCoord.LoadPartitions:input_type -> milvus.proto.query.LoadPartitionsRequest 9, // 181: milvus.proto.query.QueryCoord.ShowLoadPartitions:input_type -> milvus.proto.query.ShowPartitionsRequest
15, // 182: milvus.proto.query.QueryCoord.ReleasePartitions:input_type -> milvus.proto.query.ReleasePartitionsRequest 14, // 182: milvus.proto.query.QueryCoord.LoadPartitions:input_type -> milvus.proto.query.LoadPartitionsRequest
11, // 183: milvus.proto.query.QueryCoord.LoadCollection:input_type -> milvus.proto.query.LoadCollectionRequest 15, // 183: milvus.proto.query.QueryCoord.ReleasePartitions:input_type -> milvus.proto.query.ReleasePartitionsRequest
12, // 184: milvus.proto.query.QueryCoord.ReleaseCollection:input_type -> milvus.proto.query.ReleaseCollectionRequest 11, // 184: milvus.proto.query.QueryCoord.LoadCollection:input_type -> milvus.proto.query.LoadCollectionRequest
24, // 185: milvus.proto.query.QueryCoord.SyncNewCreatedPartition:input_type -> milvus.proto.query.SyncNewCreatedPartitionRequest 12, // 185: milvus.proto.query.QueryCoord.ReleaseCollection:input_type -> milvus.proto.query.ReleaseCollectionRequest
16, // 186: milvus.proto.query.QueryCoord.GetPartitionStates:input_type -> milvus.proto.query.GetPartitionStatesRequest 24, // 186: milvus.proto.query.QueryCoord.SyncNewCreatedPartition:input_type -> milvus.proto.query.SyncNewCreatedPartitionRequest
18, // 187: milvus.proto.query.QueryCoord.GetLoadSegmentInfo:input_type -> milvus.proto.query.GetSegmentInfoRequest 16, // 187: milvus.proto.query.QueryCoord.GetPartitionStates:input_type -> milvus.proto.query.GetPartitionStatesRequest
40, // 188: milvus.proto.query.QueryCoord.LoadBalance:input_type -> milvus.proto.query.LoadBalanceRequest 18, // 188: milvus.proto.query.QueryCoord.GetLoadSegmentInfo:input_type -> milvus.proto.query.GetSegmentInfoRequest
141, // 189: milvus.proto.query.QueryCoord.ShowConfigurations:input_type -> milvus.proto.internal.ShowConfigurationsRequest 40, // 189: milvus.proto.query.QueryCoord.LoadBalance:input_type -> milvus.proto.query.LoadBalanceRequest
142, // 190: milvus.proto.query.QueryCoord.GetMetrics:input_type -> milvus.proto.milvus.GetMetricsRequest 142, // 190: milvus.proto.query.QueryCoord.ShowConfigurations:input_type -> milvus.proto.internal.ShowConfigurationsRequest
143, // 191: milvus.proto.query.QueryCoord.GetReplicas:input_type -> milvus.proto.milvus.GetReplicasRequest 143, // 191: milvus.proto.query.QueryCoord.GetMetrics:input_type -> milvus.proto.milvus.GetMetricsRequest
20, // 192: milvus.proto.query.QueryCoord.GetShardLeaders:input_type -> milvus.proto.query.GetShardLeadersRequest 144, // 192: milvus.proto.query.QueryCoord.GetReplicas:input_type -> milvus.proto.milvus.GetReplicasRequest
144, // 193: milvus.proto.query.QueryCoord.CheckHealth:input_type -> milvus.proto.milvus.CheckHealthRequest 20, // 193: milvus.proto.query.QueryCoord.GetShardLeaders:input_type -> milvus.proto.query.GetShardLeadersRequest
145, // 194: milvus.proto.query.QueryCoord.CreateResourceGroup:input_type -> milvus.proto.milvus.CreateResourceGroupRequest 145, // 194: milvus.proto.query.QueryCoord.CheckHealth:input_type -> milvus.proto.milvus.CheckHealthRequest
22, // 195: milvus.proto.query.QueryCoord.UpdateResourceGroups:input_type -> milvus.proto.query.UpdateResourceGroupsRequest 146, // 195: milvus.proto.query.QueryCoord.CreateResourceGroup:input_type -> milvus.proto.milvus.CreateResourceGroupRequest
146, // 196: milvus.proto.query.QueryCoord.DropResourceGroup:input_type -> milvus.proto.milvus.DropResourceGroupRequest 22, // 196: milvus.proto.query.QueryCoord.UpdateResourceGroups:input_type -> milvus.proto.query.UpdateResourceGroupsRequest
147, // 197: milvus.proto.query.QueryCoord.TransferNode:input_type -> milvus.proto.milvus.TransferNodeRequest 147, // 197: milvus.proto.query.QueryCoord.DropResourceGroup:input_type -> milvus.proto.milvus.DropResourceGroupRequest
64, // 198: milvus.proto.query.QueryCoord.TransferReplica:input_type -> milvus.proto.query.TransferReplicaRequest 148, // 198: milvus.proto.query.QueryCoord.TransferNode:input_type -> milvus.proto.milvus.TransferNodeRequest
148, // 199: milvus.proto.query.QueryCoord.ListResourceGroups:input_type -> milvus.proto.milvus.ListResourceGroupsRequest 64, // 199: milvus.proto.query.QueryCoord.TransferReplica:input_type -> milvus.proto.query.TransferReplicaRequest
65, // 200: milvus.proto.query.QueryCoord.DescribeResourceGroup:input_type -> milvus.proto.query.DescribeResourceGroupRequest 149, // 200: milvus.proto.query.QueryCoord.ListResourceGroups:input_type -> milvus.proto.milvus.ListResourceGroupsRequest
97, // 201: milvus.proto.query.QueryCoord.ListLoadedSegments:input_type -> milvus.proto.query.ListLoadedSegmentsRequest 65, // 201: milvus.proto.query.QueryCoord.DescribeResourceGroup:input_type -> milvus.proto.query.DescribeResourceGroupRequest
73, // 202: milvus.proto.query.QueryCoord.ListCheckers:input_type -> milvus.proto.query.ListCheckersRequest 97, // 202: milvus.proto.query.QueryCoord.ListLoadedSegments:input_type -> milvus.proto.query.ListLoadedSegmentsRequest
71, // 203: milvus.proto.query.QueryCoord.ActivateChecker:input_type -> milvus.proto.query.ActivateCheckerRequest 73, // 203: milvus.proto.query.QueryCoord.ListCheckers:input_type -> milvus.proto.query.ListCheckersRequest
72, // 204: milvus.proto.query.QueryCoord.DeactivateChecker:input_type -> milvus.proto.query.DeactivateCheckerRequest 71, // 204: milvus.proto.query.QueryCoord.ActivateChecker:input_type -> milvus.proto.query.ActivateCheckerRequest
81, // 205: milvus.proto.query.QueryCoord.ListQueryNode:input_type -> milvus.proto.query.ListQueryNodeRequest 72, // 205: milvus.proto.query.QueryCoord.DeactivateChecker:input_type -> milvus.proto.query.DeactivateCheckerRequest
83, // 206: milvus.proto.query.QueryCoord.GetQueryNodeDistribution:input_type -> milvus.proto.query.GetQueryNodeDistributionRequest 81, // 206: milvus.proto.query.QueryCoord.ListQueryNode:input_type -> milvus.proto.query.ListQueryNodeRequest
85, // 207: milvus.proto.query.QueryCoord.SuspendBalance:input_type -> milvus.proto.query.SuspendBalanceRequest 83, // 207: milvus.proto.query.QueryCoord.GetQueryNodeDistribution:input_type -> milvus.proto.query.GetQueryNodeDistributionRequest
86, // 208: milvus.proto.query.QueryCoord.ResumeBalance:input_type -> milvus.proto.query.ResumeBalanceRequest 85, // 208: milvus.proto.query.QueryCoord.SuspendBalance:input_type -> milvus.proto.query.SuspendBalanceRequest
87, // 209: milvus.proto.query.QueryCoord.CheckBalanceStatus:input_type -> milvus.proto.query.CheckBalanceStatusRequest 86, // 209: milvus.proto.query.QueryCoord.ResumeBalance:input_type -> milvus.proto.query.ResumeBalanceRequest
89, // 210: milvus.proto.query.QueryCoord.SuspendNode:input_type -> milvus.proto.query.SuspendNodeRequest 87, // 210: milvus.proto.query.QueryCoord.CheckBalanceStatus:input_type -> milvus.proto.query.CheckBalanceStatusRequest
90, // 211: milvus.proto.query.QueryCoord.ResumeNode:input_type -> milvus.proto.query.ResumeNodeRequest 89, // 211: milvus.proto.query.QueryCoord.SuspendNode:input_type -> milvus.proto.query.SuspendNodeRequest
91, // 212: milvus.proto.query.QueryCoord.TransferSegment:input_type -> milvus.proto.query.TransferSegmentRequest 90, // 212: milvus.proto.query.QueryCoord.ResumeNode:input_type -> milvus.proto.query.ResumeNodeRequest
92, // 213: milvus.proto.query.QueryCoord.TransferChannel:input_type -> milvus.proto.query.TransferChannelRequest 91, // 213: milvus.proto.query.QueryCoord.TransferSegment:input_type -> milvus.proto.query.TransferSegmentRequest
93, // 214: milvus.proto.query.QueryCoord.CheckQueryNodeDistribution:input_type -> milvus.proto.query.CheckQueryNodeDistributionRequest 92, // 214: milvus.proto.query.QueryCoord.TransferChannel:input_type -> milvus.proto.query.TransferChannelRequest
94, // 215: milvus.proto.query.QueryCoord.UpdateLoadConfig:input_type -> milvus.proto.query.UpdateLoadConfigRequest 93, // 215: milvus.proto.query.QueryCoord.CheckQueryNodeDistribution:input_type -> milvus.proto.query.CheckQueryNodeDistributionRequest
149, // 216: milvus.proto.query.QueryNode.GetComponentStates:input_type -> milvus.proto.milvus.GetComponentStatesRequest 94, // 216: milvus.proto.query.QueryCoord.UpdateLoadConfig:input_type -> milvus.proto.query.UpdateLoadConfigRequest
150, // 217: milvus.proto.query.QueryNode.GetTimeTickChannel:input_type -> milvus.proto.internal.GetTimeTickChannelRequest 150, // 217: milvus.proto.query.QueryNode.GetComponentStates:input_type -> milvus.proto.milvus.GetComponentStatesRequest
151, // 218: milvus.proto.query.QueryNode.GetStatisticsChannel:input_type -> milvus.proto.internal.GetStatisticsChannelRequest 151, // 218: milvus.proto.query.QueryNode.GetTimeTickChannel:input_type -> milvus.proto.internal.GetTimeTickChannelRequest
26, // 219: milvus.proto.query.QueryNode.WatchDmChannels:input_type -> milvus.proto.query.WatchDmChannelsRequest 152, // 219: milvus.proto.query.QueryNode.GetStatisticsChannel:input_type -> milvus.proto.internal.GetStatisticsChannelRequest
27, // 220: milvus.proto.query.QueryNode.UnsubDmChannel:input_type -> milvus.proto.query.UnsubDmChannelRequest 26, // 220: milvus.proto.query.QueryNode.WatchDmChannels:input_type -> milvus.proto.query.WatchDmChannelsRequest
31, // 221: milvus.proto.query.QueryNode.LoadSegments:input_type -> milvus.proto.query.LoadSegmentsRequest 27, // 221: milvus.proto.query.QueryNode.UnsubDmChannel:input_type -> milvus.proto.query.UnsubDmChannelRequest
12, // 222: milvus.proto.query.QueryNode.ReleaseCollection:input_type -> milvus.proto.query.ReleaseCollectionRequest 31, // 222: milvus.proto.query.QueryNode.LoadSegments:input_type -> milvus.proto.query.LoadSegmentsRequest
14, // 223: milvus.proto.query.QueryNode.LoadPartitions:input_type -> milvus.proto.query.LoadPartitionsRequest 12, // 223: milvus.proto.query.QueryNode.ReleaseCollection:input_type -> milvus.proto.query.ReleaseCollectionRequest
15, // 224: milvus.proto.query.QueryNode.ReleasePartitions:input_type -> milvus.proto.query.ReleasePartitionsRequest 14, // 224: milvus.proto.query.QueryNode.LoadPartitions:input_type -> milvus.proto.query.LoadPartitionsRequest
32, // 225: milvus.proto.query.QueryNode.ReleaseSegments:input_type -> milvus.proto.query.ReleaseSegmentsRequest 15, // 225: milvus.proto.query.QueryNode.ReleasePartitions:input_type -> milvus.proto.query.ReleasePartitionsRequest
18, // 226: milvus.proto.query.QueryNode.GetSegmentInfo:input_type -> milvus.proto.query.GetSegmentInfoRequest 32, // 226: milvus.proto.query.QueryNode.ReleaseSegments:input_type -> milvus.proto.query.ReleaseSegmentsRequest
35, // 227: milvus.proto.query.QueryNode.SyncReplicaSegments:input_type -> milvus.proto.query.SyncReplicaSegmentsRequest 18, // 227: milvus.proto.query.QueryNode.GetSegmentInfo:input_type -> milvus.proto.query.GetSegmentInfoRequest
13, // 228: milvus.proto.query.QueryNode.GetStatistics:input_type -> milvus.proto.query.GetStatisticsRequest 35, // 228: milvus.proto.query.QueryNode.SyncReplicaSegments:input_type -> milvus.proto.query.SyncReplicaSegmentsRequest
33, // 229: milvus.proto.query.QueryNode.Search:input_type -> milvus.proto.query.SearchRequest 13, // 229: milvus.proto.query.QueryNode.GetStatistics:input_type -> milvus.proto.query.GetStatisticsRequest
33, // 230: milvus.proto.query.QueryNode.SearchSegments:input_type -> milvus.proto.query.SearchRequest 33, // 230: milvus.proto.query.QueryNode.Search:input_type -> milvus.proto.query.SearchRequest
34, // 231: milvus.proto.query.QueryNode.Query:input_type -> milvus.proto.query.QueryRequest 33, // 231: milvus.proto.query.QueryNode.SearchSegments:input_type -> milvus.proto.query.SearchRequest
34, // 232: milvus.proto.query.QueryNode.QueryStream:input_type -> milvus.proto.query.QueryRequest 34, // 232: milvus.proto.query.QueryNode.Query:input_type -> milvus.proto.query.QueryRequest
34, // 233: milvus.proto.query.QueryNode.QuerySegments:input_type -> milvus.proto.query.QueryRequest 34, // 233: milvus.proto.query.QueryNode.QueryStream:input_type -> milvus.proto.query.QueryRequest
34, // 234: milvus.proto.query.QueryNode.QueryStreamSegments:input_type -> milvus.proto.query.QueryRequest 34, // 234: milvus.proto.query.QueryNode.QuerySegments:input_type -> milvus.proto.query.QueryRequest
141, // 235: milvus.proto.query.QueryNode.ShowConfigurations:input_type -> milvus.proto.internal.ShowConfigurationsRequest 34, // 235: milvus.proto.query.QueryNode.QueryStreamSegments:input_type -> milvus.proto.query.QueryRequest
142, // 236: milvus.proto.query.QueryNode.GetMetrics:input_type -> milvus.proto.milvus.GetMetricsRequest 142, // 236: milvus.proto.query.QueryNode.ShowConfigurations:input_type -> milvus.proto.internal.ShowConfigurationsRequest
50, // 237: milvus.proto.query.QueryNode.GetDataDistribution:input_type -> milvus.proto.query.GetDataDistributionRequest 143, // 237: milvus.proto.query.QueryNode.GetMetrics:input_type -> milvus.proto.milvus.GetMetricsRequest
62, // 238: milvus.proto.query.QueryNode.SyncDistribution:input_type -> milvus.proto.query.SyncDistributionRequest 50, // 238: milvus.proto.query.QueryNode.GetDataDistribution:input_type -> milvus.proto.query.GetDataDistributionRequest
68, // 239: milvus.proto.query.QueryNode.Delete:input_type -> milvus.proto.query.DeleteRequest 62, // 239: milvus.proto.query.QueryNode.SyncDistribution:input_type -> milvus.proto.query.SyncDistributionRequest
69, // 240: milvus.proto.query.QueryNode.DeleteBatch:input_type -> milvus.proto.query.DeleteBatchRequest 68, // 240: milvus.proto.query.QueryNode.Delete:input_type -> milvus.proto.query.DeleteRequest
95, // 241: milvus.proto.query.QueryNode.UpdateSchema:input_type -> milvus.proto.query.UpdateSchemaRequest 69, // 241: milvus.proto.query.QueryNode.DeleteBatch:input_type -> milvus.proto.query.DeleteBatchRequest
96, // 242: milvus.proto.query.QueryNode.RunAnalyzer:input_type -> milvus.proto.query.RunAnalyzerRequest 95, // 242: milvus.proto.query.QueryNode.UpdateSchema:input_type -> milvus.proto.query.UpdateSchemaRequest
8, // 243: milvus.proto.query.QueryCoord.ShowLoadCollections:output_type -> milvus.proto.query.ShowCollectionsResponse 96, // 243: milvus.proto.query.QueryNode.RunAnalyzer:input_type -> milvus.proto.query.RunAnalyzerRequest
10, // 244: milvus.proto.query.QueryCoord.ShowLoadPartitions:output_type -> milvus.proto.query.ShowPartitionsResponse 99, // 244: milvus.proto.query.QueryNode.DropIndex:input_type -> milvus.proto.query.DropIndexRequest
121, // 245: milvus.proto.query.QueryCoord.LoadPartitions:output_type -> milvus.proto.common.Status 8, // 245: milvus.proto.query.QueryCoord.ShowLoadCollections:output_type -> milvus.proto.query.ShowCollectionsResponse
121, // 246: milvus.proto.query.QueryCoord.ReleasePartitions:output_type -> milvus.proto.common.Status 10, // 246: milvus.proto.query.QueryCoord.ShowLoadPartitions:output_type -> milvus.proto.query.ShowPartitionsResponse
121, // 247: milvus.proto.query.QueryCoord.LoadCollection:output_type -> milvus.proto.common.Status 122, // 247: milvus.proto.query.QueryCoord.LoadPartitions:output_type -> milvus.proto.common.Status
121, // 248: milvus.proto.query.QueryCoord.ReleaseCollection:output_type -> milvus.proto.common.Status 122, // 248: milvus.proto.query.QueryCoord.ReleasePartitions:output_type -> milvus.proto.common.Status
121, // 249: milvus.proto.query.QueryCoord.SyncNewCreatedPartition:output_type -> milvus.proto.common.Status 122, // 249: milvus.proto.query.QueryCoord.LoadCollection:output_type -> milvus.proto.common.Status
17, // 250: milvus.proto.query.QueryCoord.GetPartitionStates:output_type -> milvus.proto.query.GetPartitionStatesResponse 122, // 250: milvus.proto.query.QueryCoord.ReleaseCollection:output_type -> milvus.proto.common.Status
19, // 251: milvus.proto.query.QueryCoord.GetLoadSegmentInfo:output_type -> milvus.proto.query.GetSegmentInfoResponse 122, // 251: milvus.proto.query.QueryCoord.SyncNewCreatedPartition:output_type -> milvus.proto.common.Status
121, // 252: milvus.proto.query.QueryCoord.LoadBalance:output_type -> milvus.proto.common.Status 17, // 252: milvus.proto.query.QueryCoord.GetPartitionStates:output_type -> milvus.proto.query.GetPartitionStatesResponse
152, // 253: milvus.proto.query.QueryCoord.ShowConfigurations:output_type -> milvus.proto.internal.ShowConfigurationsResponse 19, // 253: milvus.proto.query.QueryCoord.GetLoadSegmentInfo:output_type -> milvus.proto.query.GetSegmentInfoResponse
153, // 254: milvus.proto.query.QueryCoord.GetMetrics:output_type -> milvus.proto.milvus.GetMetricsResponse 122, // 254: milvus.proto.query.QueryCoord.LoadBalance:output_type -> milvus.proto.common.Status
154, // 255: milvus.proto.query.QueryCoord.GetReplicas:output_type -> milvus.proto.milvus.GetReplicasResponse 153, // 255: milvus.proto.query.QueryCoord.ShowConfigurations:output_type -> milvus.proto.internal.ShowConfigurationsResponse
21, // 256: milvus.proto.query.QueryCoord.GetShardLeaders:output_type -> milvus.proto.query.GetShardLeadersResponse 154, // 256: milvus.proto.query.QueryCoord.GetMetrics:output_type -> milvus.proto.milvus.GetMetricsResponse
155, // 257: milvus.proto.query.QueryCoord.CheckHealth:output_type -> milvus.proto.milvus.CheckHealthResponse 155, // 257: milvus.proto.query.QueryCoord.GetReplicas:output_type -> milvus.proto.milvus.GetReplicasResponse
121, // 258: milvus.proto.query.QueryCoord.CreateResourceGroup:output_type -> milvus.proto.common.Status 21, // 258: milvus.proto.query.QueryCoord.GetShardLeaders:output_type -> milvus.proto.query.GetShardLeadersResponse
121, // 259: milvus.proto.query.QueryCoord.UpdateResourceGroups:output_type -> milvus.proto.common.Status 156, // 259: milvus.proto.query.QueryCoord.CheckHealth:output_type -> milvus.proto.milvus.CheckHealthResponse
121, // 260: milvus.proto.query.QueryCoord.DropResourceGroup:output_type -> milvus.proto.common.Status 122, // 260: milvus.proto.query.QueryCoord.CreateResourceGroup:output_type -> milvus.proto.common.Status
121, // 261: milvus.proto.query.QueryCoord.TransferNode:output_type -> milvus.proto.common.Status 122, // 261: milvus.proto.query.QueryCoord.UpdateResourceGroups:output_type -> milvus.proto.common.Status
121, // 262: milvus.proto.query.QueryCoord.TransferReplica:output_type -> milvus.proto.common.Status 122, // 262: milvus.proto.query.QueryCoord.DropResourceGroup:output_type -> milvus.proto.common.Status
156, // 263: milvus.proto.query.QueryCoord.ListResourceGroups:output_type -> milvus.proto.milvus.ListResourceGroupsResponse 122, // 263: milvus.proto.query.QueryCoord.TransferNode:output_type -> milvus.proto.common.Status
66, // 264: milvus.proto.query.QueryCoord.DescribeResourceGroup:output_type -> milvus.proto.query.DescribeResourceGroupResponse 122, // 264: milvus.proto.query.QueryCoord.TransferReplica:output_type -> milvus.proto.common.Status
98, // 265: milvus.proto.query.QueryCoord.ListLoadedSegments:output_type -> milvus.proto.query.ListLoadedSegmentsResponse 157, // 265: milvus.proto.query.QueryCoord.ListResourceGroups:output_type -> milvus.proto.milvus.ListResourceGroupsResponse
74, // 266: milvus.proto.query.QueryCoord.ListCheckers:output_type -> milvus.proto.query.ListCheckersResponse 66, // 266: milvus.proto.query.QueryCoord.DescribeResourceGroup:output_type -> milvus.proto.query.DescribeResourceGroupResponse
121, // 267: milvus.proto.query.QueryCoord.ActivateChecker:output_type -> milvus.proto.common.Status 98, // 267: milvus.proto.query.QueryCoord.ListLoadedSegments:output_type -> milvus.proto.query.ListLoadedSegmentsResponse
121, // 268: milvus.proto.query.QueryCoord.DeactivateChecker:output_type -> milvus.proto.common.Status 74, // 268: milvus.proto.query.QueryCoord.ListCheckers:output_type -> milvus.proto.query.ListCheckersResponse
82, // 269: milvus.proto.query.QueryCoord.ListQueryNode:output_type -> milvus.proto.query.ListQueryNodeResponse 122, // 269: milvus.proto.query.QueryCoord.ActivateChecker:output_type -> milvus.proto.common.Status
84, // 270: milvus.proto.query.QueryCoord.GetQueryNodeDistribution:output_type -> milvus.proto.query.GetQueryNodeDistributionResponse 122, // 270: milvus.proto.query.QueryCoord.DeactivateChecker:output_type -> milvus.proto.common.Status
121, // 271: milvus.proto.query.QueryCoord.SuspendBalance:output_type -> milvus.proto.common.Status 82, // 271: milvus.proto.query.QueryCoord.ListQueryNode:output_type -> milvus.proto.query.ListQueryNodeResponse
121, // 272: milvus.proto.query.QueryCoord.ResumeBalance:output_type -> milvus.proto.common.Status 84, // 272: milvus.proto.query.QueryCoord.GetQueryNodeDistribution:output_type -> milvus.proto.query.GetQueryNodeDistributionResponse
88, // 273: milvus.proto.query.QueryCoord.CheckBalanceStatus:output_type -> milvus.proto.query.CheckBalanceStatusResponse 122, // 273: milvus.proto.query.QueryCoord.SuspendBalance:output_type -> milvus.proto.common.Status
121, // 274: milvus.proto.query.QueryCoord.SuspendNode:output_type -> milvus.proto.common.Status 122, // 274: milvus.proto.query.QueryCoord.ResumeBalance:output_type -> milvus.proto.common.Status
121, // 275: milvus.proto.query.QueryCoord.ResumeNode:output_type -> milvus.proto.common.Status 88, // 275: milvus.proto.query.QueryCoord.CheckBalanceStatus:output_type -> milvus.proto.query.CheckBalanceStatusResponse
121, // 276: milvus.proto.query.QueryCoord.TransferSegment:output_type -> milvus.proto.common.Status 122, // 276: milvus.proto.query.QueryCoord.SuspendNode:output_type -> milvus.proto.common.Status
121, // 277: milvus.proto.query.QueryCoord.TransferChannel:output_type -> milvus.proto.common.Status 122, // 277: milvus.proto.query.QueryCoord.ResumeNode:output_type -> milvus.proto.common.Status
121, // 278: milvus.proto.query.QueryCoord.CheckQueryNodeDistribution:output_type -> milvus.proto.common.Status 122, // 278: milvus.proto.query.QueryCoord.TransferSegment:output_type -> milvus.proto.common.Status
121, // 279: milvus.proto.query.QueryCoord.UpdateLoadConfig:output_type -> milvus.proto.common.Status 122, // 279: milvus.proto.query.QueryCoord.TransferChannel:output_type -> milvus.proto.common.Status
157, // 280: milvus.proto.query.QueryNode.GetComponentStates:output_type -> milvus.proto.milvus.ComponentStates 122, // 280: milvus.proto.query.QueryCoord.CheckQueryNodeDistribution:output_type -> milvus.proto.common.Status
158, // 281: milvus.proto.query.QueryNode.GetTimeTickChannel:output_type -> milvus.proto.milvus.StringResponse 122, // 281: milvus.proto.query.QueryCoord.UpdateLoadConfig:output_type -> milvus.proto.common.Status
158, // 282: milvus.proto.query.QueryNode.GetStatisticsChannel:output_type -> milvus.proto.milvus.StringResponse 158, // 282: milvus.proto.query.QueryNode.GetComponentStates:output_type -> milvus.proto.milvus.ComponentStates
121, // 283: milvus.proto.query.QueryNode.WatchDmChannels:output_type -> milvus.proto.common.Status 159, // 283: milvus.proto.query.QueryNode.GetTimeTickChannel:output_type -> milvus.proto.milvus.StringResponse
121, // 284: milvus.proto.query.QueryNode.UnsubDmChannel:output_type -> milvus.proto.common.Status 159, // 284: milvus.proto.query.QueryNode.GetStatisticsChannel:output_type -> milvus.proto.milvus.StringResponse
121, // 285: milvus.proto.query.QueryNode.LoadSegments:output_type -> milvus.proto.common.Status 122, // 285: milvus.proto.query.QueryNode.WatchDmChannels:output_type -> milvus.proto.common.Status
121, // 286: milvus.proto.query.QueryNode.ReleaseCollection:output_type -> milvus.proto.common.Status 122, // 286: milvus.proto.query.QueryNode.UnsubDmChannel:output_type -> milvus.proto.common.Status
121, // 287: milvus.proto.query.QueryNode.LoadPartitions:output_type -> milvus.proto.common.Status 122, // 287: milvus.proto.query.QueryNode.LoadSegments:output_type -> milvus.proto.common.Status
121, // 288: milvus.proto.query.QueryNode.ReleasePartitions:output_type -> milvus.proto.common.Status 122, // 288: milvus.proto.query.QueryNode.ReleaseCollection:output_type -> milvus.proto.common.Status
121, // 289: milvus.proto.query.QueryNode.ReleaseSegments:output_type -> milvus.proto.common.Status 122, // 289: milvus.proto.query.QueryNode.LoadPartitions:output_type -> milvus.proto.common.Status
19, // 290: milvus.proto.query.QueryNode.GetSegmentInfo:output_type -> milvus.proto.query.GetSegmentInfoResponse 122, // 290: milvus.proto.query.QueryNode.ReleasePartitions:output_type -> milvus.proto.common.Status
121, // 291: milvus.proto.query.QueryNode.SyncReplicaSegments:output_type -> milvus.proto.common.Status 122, // 291: milvus.proto.query.QueryNode.ReleaseSegments:output_type -> milvus.proto.common.Status
159, // 292: milvus.proto.query.QueryNode.GetStatistics:output_type -> milvus.proto.internal.GetStatisticsResponse 19, // 292: milvus.proto.query.QueryNode.GetSegmentInfo:output_type -> milvus.proto.query.GetSegmentInfoResponse
160, // 293: milvus.proto.query.QueryNode.Search:output_type -> milvus.proto.internal.SearchResults 122, // 293: milvus.proto.query.QueryNode.SyncReplicaSegments:output_type -> milvus.proto.common.Status
160, // 294: milvus.proto.query.QueryNode.SearchSegments:output_type -> milvus.proto.internal.SearchResults 160, // 294: milvus.proto.query.QueryNode.GetStatistics:output_type -> milvus.proto.internal.GetStatisticsResponse
161, // 295: milvus.proto.query.QueryNode.Query:output_type -> milvus.proto.internal.RetrieveResults 161, // 295: milvus.proto.query.QueryNode.Search:output_type -> milvus.proto.internal.SearchResults
161, // 296: milvus.proto.query.QueryNode.QueryStream:output_type -> milvus.proto.internal.RetrieveResults 161, // 296: milvus.proto.query.QueryNode.SearchSegments:output_type -> milvus.proto.internal.SearchResults
161, // 297: milvus.proto.query.QueryNode.QuerySegments:output_type -> milvus.proto.internal.RetrieveResults 162, // 297: milvus.proto.query.QueryNode.Query:output_type -> milvus.proto.internal.RetrieveResults
161, // 298: milvus.proto.query.QueryNode.QueryStreamSegments:output_type -> milvus.proto.internal.RetrieveResults 162, // 298: milvus.proto.query.QueryNode.QueryStream:output_type -> milvus.proto.internal.RetrieveResults
152, // 299: milvus.proto.query.QueryNode.ShowConfigurations:output_type -> milvus.proto.internal.ShowConfigurationsResponse 162, // 299: milvus.proto.query.QueryNode.QuerySegments:output_type -> milvus.proto.internal.RetrieveResults
153, // 300: milvus.proto.query.QueryNode.GetMetrics:output_type -> milvus.proto.milvus.GetMetricsResponse 162, // 300: milvus.proto.query.QueryNode.QueryStreamSegments:output_type -> milvus.proto.internal.RetrieveResults
51, // 301: milvus.proto.query.QueryNode.GetDataDistribution:output_type -> milvus.proto.query.GetDataDistributionResponse 153, // 301: milvus.proto.query.QueryNode.ShowConfigurations:output_type -> milvus.proto.internal.ShowConfigurationsResponse
121, // 302: milvus.proto.query.QueryNode.SyncDistribution:output_type -> milvus.proto.common.Status 154, // 302: milvus.proto.query.QueryNode.GetMetrics:output_type -> milvus.proto.milvus.GetMetricsResponse
121, // 303: milvus.proto.query.QueryNode.Delete:output_type -> milvus.proto.common.Status 51, // 303: milvus.proto.query.QueryNode.GetDataDistribution:output_type -> milvus.proto.query.GetDataDistributionResponse
70, // 304: milvus.proto.query.QueryNode.DeleteBatch:output_type -> milvus.proto.query.DeleteBatchResponse 122, // 304: milvus.proto.query.QueryNode.SyncDistribution:output_type -> milvus.proto.common.Status
121, // 305: milvus.proto.query.QueryNode.UpdateSchema:output_type -> milvus.proto.common.Status 122, // 305: milvus.proto.query.QueryNode.Delete:output_type -> milvus.proto.common.Status
162, // 306: milvus.proto.query.QueryNode.RunAnalyzer:output_type -> milvus.proto.milvus.RunAnalyzerResponse 70, // 306: milvus.proto.query.QueryNode.DeleteBatch:output_type -> milvus.proto.query.DeleteBatchResponse
243, // [243:307] is the sub-list for method output_type 122, // 307: milvus.proto.query.QueryNode.UpdateSchema:output_type -> milvus.proto.common.Status
179, // [179:243] is the sub-list for method input_type 163, // 308: milvus.proto.query.QueryNode.RunAnalyzer:output_type -> milvus.proto.milvus.RunAnalyzerResponse
179, // [179:179] is the sub-list for extension type_name 122, // 309: milvus.proto.query.QueryNode.DropIndex:output_type -> milvus.proto.common.Status
179, // [179:179] is the sub-list for extension extendee 245, // [245:310] is the sub-list for method output_type
0, // [0:179] is the sub-list for field type_name 180, // [180:245] is the sub-list for method input_type
180, // [180:180] is the sub-list for extension type_name
180, // [180:180] is the sub-list for extension extendee
0, // [0:180] is the sub-list for field type_name
} }
func init() { file_query_coord_proto_init() } func init() { file_query_coord_proto_init() }
@ -11346,6 +11446,18 @@ func file_query_coord_proto_init() {
return nil return nil
} }
} }
file_query_coord_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DropIndexRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} }
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
@ -11353,7 +11465,7 @@ func file_query_coord_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_query_coord_proto_rawDesc, RawDescriptor: file_query_coord_proto_rawDesc,
NumEnums: 7, NumEnums: 7,
NumMessages: 113, NumMessages: 114,
NumExtensions: 0, NumExtensions: 0,
NumServices: 2, NumServices: 2,
}, },

View File

@ -1475,6 +1475,7 @@ const (
QueryNode_DeleteBatch_FullMethodName = "/milvus.proto.query.QueryNode/DeleteBatch" QueryNode_DeleteBatch_FullMethodName = "/milvus.proto.query.QueryNode/DeleteBatch"
QueryNode_UpdateSchema_FullMethodName = "/milvus.proto.query.QueryNode/UpdateSchema" QueryNode_UpdateSchema_FullMethodName = "/milvus.proto.query.QueryNode/UpdateSchema"
QueryNode_RunAnalyzer_FullMethodName = "/milvus.proto.query.QueryNode/RunAnalyzer" QueryNode_RunAnalyzer_FullMethodName = "/milvus.proto.query.QueryNode/RunAnalyzer"
QueryNode_DropIndex_FullMethodName = "/milvus.proto.query.QueryNode/DropIndex"
) )
// QueryNodeClient is the client API for QueryNode service. // QueryNodeClient is the client API for QueryNode service.
@ -1511,6 +1512,7 @@ type QueryNodeClient interface {
DeleteBatch(ctx context.Context, in *DeleteBatchRequest, opts ...grpc.CallOption) (*DeleteBatchResponse, error) DeleteBatch(ctx context.Context, in *DeleteBatchRequest, opts ...grpc.CallOption) (*DeleteBatchResponse, error)
UpdateSchema(ctx context.Context, in *UpdateSchemaRequest, opts ...grpc.CallOption) (*commonpb.Status, error) UpdateSchema(ctx context.Context, in *UpdateSchemaRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
RunAnalyzer(ctx context.Context, in *RunAnalyzerRequest, opts ...grpc.CallOption) (*milvuspb.RunAnalyzerResponse, error) RunAnalyzer(ctx context.Context, in *RunAnalyzerRequest, opts ...grpc.CallOption) (*milvuspb.RunAnalyzerResponse, error)
DropIndex(ctx context.Context, in *DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
} }
type queryNodeClient struct { type queryNodeClient struct {
@ -1810,6 +1812,15 @@ func (c *queryNodeClient) RunAnalyzer(ctx context.Context, in *RunAnalyzerReques
return out, nil return out, nil
} }
func (c *queryNodeClient) DropIndex(ctx context.Context, in *DropIndexRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, QueryNode_DropIndex_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// QueryNodeServer is the server API for QueryNode service. // QueryNodeServer is the server API for QueryNode service.
// All implementations should embed UnimplementedQueryNodeServer // All implementations should embed UnimplementedQueryNodeServer
// for forward compatibility // for forward compatibility
@ -1844,6 +1855,7 @@ type QueryNodeServer interface {
DeleteBatch(context.Context, *DeleteBatchRequest) (*DeleteBatchResponse, error) DeleteBatch(context.Context, *DeleteBatchRequest) (*DeleteBatchResponse, error)
UpdateSchema(context.Context, *UpdateSchemaRequest) (*commonpb.Status, error) UpdateSchema(context.Context, *UpdateSchemaRequest) (*commonpb.Status, error)
RunAnalyzer(context.Context, *RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) RunAnalyzer(context.Context, *RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error)
DropIndex(context.Context, *DropIndexRequest) (*commonpb.Status, error)
} }
// UnimplementedQueryNodeServer should be embedded to have forward compatible implementations. // UnimplementedQueryNodeServer should be embedded to have forward compatible implementations.
@ -1931,6 +1943,9 @@ func (UnimplementedQueryNodeServer) UpdateSchema(context.Context, *UpdateSchemaR
func (UnimplementedQueryNodeServer) RunAnalyzer(context.Context, *RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) { func (UnimplementedQueryNodeServer) RunAnalyzer(context.Context, *RunAnalyzerRequest) (*milvuspb.RunAnalyzerResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RunAnalyzer not implemented") return nil, status.Errorf(codes.Unimplemented, "method RunAnalyzer not implemented")
} }
func (UnimplementedQueryNodeServer) DropIndex(context.Context, *DropIndexRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method DropIndex not implemented")
}
// UnsafeQueryNodeServer may be embedded to opt out of forward compatibility for this service. // UnsafeQueryNodeServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to QueryNodeServer will // Use of this interface is not recommended, as added methods to QueryNodeServer will
@ -2435,6 +2450,24 @@ func _QueryNode_RunAnalyzer_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _QueryNode_DropIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DropIndexRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryNodeServer).DropIndex(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: QueryNode_DropIndex_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryNodeServer).DropIndex(ctx, req.(*DropIndexRequest))
}
return interceptor(ctx, in, info, handler)
}
// QueryNode_ServiceDesc is the grpc.ServiceDesc for QueryNode service. // QueryNode_ServiceDesc is the grpc.ServiceDesc for QueryNode service.
// It's only intended for direct use with grpc.RegisterService, // It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy) // and not to be introspected or modified (even as a copy)
@ -2542,6 +2575,10 @@ var QueryNode_ServiceDesc = grpc.ServiceDesc{
MethodName: "RunAnalyzer", MethodName: "RunAnalyzer",
Handler: _QueryNode_RunAnalyzer_Handler, Handler: _QueryNode_RunAnalyzer_Handler,
}, },
{
MethodName: "DropIndex",
Handler: _QueryNode_DropIndex_Handler,
},
}, },
Streams: []grpc.StreamDesc{ Streams: []grpc.StreamDesc{
{ {

View File

@ -182,8 +182,7 @@ class TestMilvusClientIndexInvalid(TestMilvusClientV2Base):
collection_name = cf.gen_unique_str(prefix) collection_name = cf.gen_unique_str(prefix)
# 1. create collection # 1. create collection
self.create_collection(client, collection_name, default_dim, consistency_level="Strong") self.create_collection(client, collection_name, default_dim, consistency_level="Strong")
error = {ct.err_code: 65535, ct.err_msg: f"index cannot be dropped, collection is loaded, " error = {ct.err_code: 1100, ct.err_msg: f"vector index cannot be dropped on loaded collection"}
f"please release it first"}
self.drop_index(client, collection_name, "vector", self.drop_index(client, collection_name, "vector",
check_task=CheckTasks.err_res, check_items=error) check_task=CheckTasks.err_res, check_items=error)
self.drop_collection(client, collection_name) self.drop_collection(client, collection_name)

View File

@ -201,9 +201,9 @@ class TestAsyncMilvusClientIndexInvalid(TestMilvusClientV2Base):
collection_name = cf.gen_unique_str(prefix) collection_name = cf.gen_unique_str(prefix)
# 1. create collection # 1. create collection
await async_client.create_collection(collection_name, default_dim, consistency_level="Strong") await async_client.create_collection(collection_name, default_dim, consistency_level="Strong")
# 2. drop index # 2. drop index
error = {ct.err_code: 65535, ct.err_msg: f"index cannot be dropped, collection is loaded, " error = {ct.err_code: 1100, ct.err_msg: f"vector index cannot be dropped on loaded collection"}
f"please release it first"}
await async_client.drop_index(collection_name, "vector", check_task=CheckTasks.err_res, check_items=error) await async_client.drop_index(collection_name, "vector", check_task=CheckTasks.err_res, check_items=error)
# 3. drop action # 3. drop action
await async_client.drop_collection(collection_name) await async_client.drop_collection(collection_name)

View File

@ -1164,9 +1164,8 @@ class TestIndexInvalid(TestcaseBase):
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index) collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
collection_w.load() collection_w.load()
collection_w.drop_index(check_task=CheckTasks.err_res, collection_w.drop_index(check_task=CheckTasks.err_res,
check_items={"err_code": 999, check_items={ct.err_code: 1100,
"err_msg": "index cannot be dropped, collection is " ct.err_msg: "vector index cannot be dropped on loaded collection"})
"loaded, please release it first"})
@pytest.mark.tags(CaseLabel.L1) @pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("n_trees", [-1, 1025, 'a']) @pytest.mark.parametrize("n_trees", [-1, 1025, 'a'])
@ -2359,12 +2358,12 @@ class TestBitmapIndex(TestcaseBase):
method: method:
1. build and drop `BITMAP` index on an empty collection 1. build and drop `BITMAP` index on an empty collection
2. rebuild `BITMAP` index on loaded collection 2. rebuild `BITMAP` index on loaded collection
3. drop index on loaded collection and raises expected error 3. drop index on loaded collection
4. re-build the same index on loaded collection 4. re-build the same index on loaded collection
expected: expected:
1. build and drop index successful on a not loaded collection 1. build and drop index successful on a not loaded collection
2. build index successful on non-indexed and loaded fields 2. build index successful on non-indexed and loaded fields
3. can not drop index on loaded collection 3. can drop index on loaded collection
""" """
# init params # init params
collection_name, nb = f"{request.function.__name__}_{primary_field}_{auto_id}", 3000 collection_name, nb = f"{request.function.__name__}_{primary_field}_{auto_id}", 3000
@ -2401,9 +2400,7 @@ class TestBitmapIndex(TestcaseBase):
self.collection_wrap.load() self.collection_wrap.load()
# re-drop scalars' index # re-drop scalars' index
self.drop_multi_index(index_names=list(set(index_params.keys()) - {DataType.FLOAT_VECTOR.name}), self.drop_multi_index(index_names=list(set(index_params.keys()) - {DataType.FLOAT_VECTOR.name}))
check_task=CheckTasks.err_res,
check_items={ct.err_code: 65535, ct.err_msg: iem.DropLoadedIndex})
# re-build loaded index # re-build loaded index
self.build_multi_index(index_params=index_params) self.build_multi_index(index_params=index_params)