mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 17:48:29 +08:00
fix: Restore the MVCC functionality. (#29749)
When the TimeTravel functionality was previously removed, it inadvertently affected the MVCC functionality within the system. This PR aims to reintroduce the internal MVCC functionality as follows: 1. Add MvccTimestamp to the requests of Search/Query and the results of Search internally. 2. When the delegator receives a Query/Search request and there is no MVCC timestamp set in the request, set the delegator's current tsafe as the MVCC timestamp of the request. If the request already has an MVCC timestamp, do not modify it. 3. When the Proxy handles Search and triggers the second phase ReQuery, divide the ReQuery into different shards and pass the MVCC timestamp to the corresponding Query requests. issue: #29656 Signed-off-by: zhenshan.cao <zhenshan.cao@zilliz.com>
This commit is contained in:
parent
cb18f18c1d
commit
60e88fb833
@ -67,11 +67,12 @@ SegmentInternalInterface::FillTargetEntry(const query::Plan* plan,
|
||||
std::unique_ptr<SearchResult>
|
||||
SegmentInternalInterface::Search(
|
||||
const query::Plan* plan,
|
||||
const query::PlaceholderGroup* placeholder_group) const {
|
||||
const query::PlaceholderGroup* placeholder_group,
|
||||
Timestamp timestamp) const {
|
||||
std::shared_lock lck(mutex_);
|
||||
milvus::tracer::AddEvent("obtained_segment_lock_mutex");
|
||||
check_search(plan);
|
||||
query::ExecPlanNodeVisitor visitor(*this, 1L << 63, placeholder_group);
|
||||
query::ExecPlanNodeVisitor visitor(*this, timestamp, placeholder_group);
|
||||
auto results = std::make_unique<SearchResult>();
|
||||
*results = visitor.get_moved_result(*plan->plan_node_);
|
||||
results->segment_ = (void*)this;
|
||||
|
||||
@ -54,7 +54,8 @@ class SegmentInterface {
|
||||
|
||||
virtual std::unique_ptr<SearchResult>
|
||||
Search(const query::Plan* Plan,
|
||||
const query::PlaceholderGroup* placeholder_group) const = 0;
|
||||
const query::PlaceholderGroup* placeholder_group,
|
||||
Timestamp timestamp) const = 0;
|
||||
|
||||
virtual std::unique_ptr<proto::segcore::RetrieveResults>
|
||||
Retrieve(const query::RetrievePlan* Plan,
|
||||
@ -136,7 +137,8 @@ class SegmentInternalInterface : public SegmentInterface {
|
||||
|
||||
std::unique_ptr<SearchResult>
|
||||
Search(const query::Plan* Plan,
|
||||
const query::PlaceholderGroup* placeholder_group) const override;
|
||||
const query::PlaceholderGroup* placeholder_group,
|
||||
Timestamp timestamp) const override;
|
||||
|
||||
void
|
||||
FillPrimaryKeys(const query::Plan* plan,
|
||||
|
||||
@ -80,6 +80,7 @@ Search(CSegmentInterface c_segment,
|
||||
CSearchPlan c_plan,
|
||||
CPlaceholderGroup c_placeholder_group,
|
||||
CTraceContext c_trace,
|
||||
uint64_t timestamp,
|
||||
CSearchResult* result) {
|
||||
try {
|
||||
auto segment = (milvus::segcore::SegmentInterface*)c_segment;
|
||||
@ -90,7 +91,7 @@ Search(CSegmentInterface c_segment,
|
||||
c_trace.traceID, c_trace.spanID, c_trace.flag};
|
||||
auto span = milvus::tracer::StartSpan("SegCoreSearch", &ctx);
|
||||
milvus::tracer::SetRootSpan(span);
|
||||
auto search_result = segment->Search(plan, phg_ptr);
|
||||
auto search_result = segment->Search(plan, phg_ptr, timestamp);
|
||||
if (!milvus::PositivelyRelated(
|
||||
plan->plan_node_->search_info_.metric_type_)) {
|
||||
for (auto& dis : search_result->distances_) {
|
||||
|
||||
@ -45,6 +45,7 @@ Search(CSegmentInterface c_segment,
|
||||
CSearchPlan c_plan,
|
||||
CPlaceholderGroup c_placeholder_group,
|
||||
CTraceContext c_trace,
|
||||
uint64_t timestamp,
|
||||
CSearchResult* result);
|
||||
|
||||
void
|
||||
|
||||
@ -90,8 +90,10 @@ Search_GrowingIndex(benchmark::State& state) {
|
||||
dataset_.timestamps_.data(),
|
||||
dataset_.raw_);
|
||||
|
||||
Timestamp ts = 10000000;
|
||||
|
||||
for (auto _ : state) {
|
||||
auto qr = segment->Search(search_plan.get(), ph_group.get());
|
||||
auto qr = segment->Search(search_plan.get(), ph_group.get(), ts);
|
||||
}
|
||||
}
|
||||
|
||||
@ -114,7 +116,8 @@ Search_Sealed(benchmark::State& state) {
|
||||
} else if (choice == 1) {
|
||||
// hnsw
|
||||
auto vec = dataset_.get_col<float>(milvus::FieldId(100));
|
||||
auto indexing = GenVecIndexing(N, dim, vec.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
auto indexing =
|
||||
GenVecIndexing(N, dim, vec.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
segcore::LoadIndexInfo info;
|
||||
info.index = std::move(indexing);
|
||||
info.field_id = (*schema)[FieldName("fakevec")].get_id().get();
|
||||
@ -123,8 +126,11 @@ Search_Sealed(benchmark::State& state) {
|
||||
segment->DropFieldData(milvus::FieldId(100));
|
||||
segment->LoadIndex(info);
|
||||
}
|
||||
|
||||
Timestamp ts = 10000000;
|
||||
|
||||
for (auto _ : state) {
|
||||
auto qr = segment->Search(search_plan.get(), ph_group.get());
|
||||
auto qr = segment->Search(search_plan.get(), ph_group.get(), ts);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -191,7 +191,8 @@ TEST_P(BinlogIndexTest, Accuracy) {
|
||||
std::vector<const milvus::query::PlaceholderGroup*> ph_group_arr = {
|
||||
ph_group.get()};
|
||||
auto nlist = segcore_config.get_nlist();
|
||||
auto binlog_index_sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto binlog_index_sr =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
ASSERT_EQ(binlog_index_sr->total_nq_, num_queries);
|
||||
EXPECT_EQ(binlog_index_sr->unity_topK_, topk);
|
||||
EXPECT_EQ(binlog_index_sr->distances_.size(), num_queries * topk);
|
||||
@ -226,7 +227,7 @@ TEST_P(BinlogIndexTest, Accuracy) {
|
||||
EXPECT_TRUE(segment->HasIndex(vec_field_id));
|
||||
EXPECT_EQ(segment->get_row_count(), data_n);
|
||||
EXPECT_FALSE(segment->HasFieldData(vec_field_id));
|
||||
auto ivf_sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto ivf_sr = segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto similary = GetKnnSearchRecall(num_queries,
|
||||
binlog_index_sr->seg_offsets_.data(),
|
||||
topk,
|
||||
@ -312,4 +313,4 @@ TEST_P(BinlogIndexTest, LoadBinlogWithoutIndexMeta) {
|
||||
EXPECT_FALSE(segment->HasIndex(vec_field_id));
|
||||
EXPECT_EQ(segment->get_row_count(), data_n);
|
||||
EXPECT_TRUE(segment->HasFieldData(vec_field_id));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1075,11 +1075,13 @@ TEST(CApiTest, SearchTest) {
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
CSearchResult search_result2;
|
||||
auto res2 = Search(segment, plan, placeholderGroup, {}, &search_result2);
|
||||
auto res2 =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result2);
|
||||
ASSERT_EQ(res2.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -1143,7 +1145,12 @@ TEST(CApiTest, SearchTestWithExpr) {
|
||||
dataset.timestamps_.push_back(1);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
dataset.timestamps_[0],
|
||||
&search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -1427,7 +1434,7 @@ TEST(CApiTest, ReduceNullResult) {
|
||||
auto slice_topKs = std::vector<int64_t>{1};
|
||||
std::vector<CSearchResult> results;
|
||||
CSearchResult res;
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res);
|
||||
status = Search(segment, plan, placeholderGroup, {}, 1L << 63, &res);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
results.push_back(res);
|
||||
CSearchResultDataBlobs cSearchResultData;
|
||||
@ -1514,9 +1521,11 @@ TEST(CApiTest, ReduceRemoveDuplicates) {
|
||||
auto slice_topKs = std::vector<int64_t>{topK / 2, topK};
|
||||
std::vector<CSearchResult> results;
|
||||
CSearchResult res1, res2;
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res1);
|
||||
status = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[0], &res1);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res2);
|
||||
status = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[0], &res2);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
results.push_back(res1);
|
||||
results.push_back(res2);
|
||||
@ -1545,11 +1554,14 @@ TEST(CApiTest, ReduceRemoveDuplicates) {
|
||||
auto slice_topKs = std::vector<int64_t>{topK / 2, topK, topK};
|
||||
std::vector<CSearchResult> results;
|
||||
CSearchResult res1, res2, res3;
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res1);
|
||||
status = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[0], &res1);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res2);
|
||||
status = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[0], &res2);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
status = Search(segment, plan, placeholderGroup, {}, &res3);
|
||||
status = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[0], &res3);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
results.push_back(res1);
|
||||
results.push_back(res2);
|
||||
@ -1666,9 +1678,11 @@ testReduceSearchWithExpr(int N,
|
||||
std::vector<CSearchResult> results;
|
||||
CSearchResult res1;
|
||||
CSearchResult res2;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &res1);
|
||||
auto res = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[N - 1], &res1);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
res = Search(segment, plan, placeholderGroup, {}, &res2);
|
||||
res = Search(
|
||||
segment, plan, placeholderGroup, {}, dataset.timestamps_[N - 1], &res2);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
results.push_back(res1);
|
||||
results.push_back(res2);
|
||||
@ -1900,9 +1914,15 @@ TEST(CApiTest, Indexing_Without_Predicate) {
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
Timestamp timestmap = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestmap,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -1962,6 +1982,7 @@ TEST(CApiTest, Indexing_Without_Predicate) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestmap,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2044,9 +2065,15 @@ TEST(CApiTest, Indexing_Expr_Without_Predicate) {
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2107,6 +2134,7 @@ TEST(CApiTest, Indexing_Expr_Without_Predicate) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2217,10 +2245,15 @@ TEST(CApiTest, Indexing_With_float_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2281,6 +2314,7 @@ TEST(CApiTest, Indexing_With_float_Predicate_Range) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2393,10 +2427,15 @@ TEST(CApiTest, Indexing_Expr_With_float_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2457,6 +2496,7 @@ TEST(CApiTest, Indexing_Expr_With_float_Predicate_Range) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2561,10 +2601,15 @@ TEST(CApiTest, Indexing_With_float_Predicate_Term) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2625,6 +2670,7 @@ TEST(CApiTest, Indexing_With_float_Predicate_Term) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2730,10 +2776,15 @@ TEST(CApiTest, Indexing_Expr_With_float_Predicate_Term) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2794,6 +2845,7 @@ TEST(CApiTest, Indexing_Expr_With_float_Predicate_Term) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -2904,10 +2956,15 @@ TEST(CApiTest, Indexing_With_binary_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -2969,6 +3026,7 @@ TEST(CApiTest, Indexing_With_binary_Predicate_Range) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -3079,10 +3137,15 @@ TEST(CApiTest, Indexing_Expr_With_binary_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_TRUE(res_before_load_index.error_code == Success)
|
||||
<< res_before_load_index.error_msg;
|
||||
|
||||
@ -3144,6 +3207,7 @@ TEST(CApiTest, Indexing_Expr_With_binary_Predicate_Range) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -3249,10 +3313,15 @@ TEST(CApiTest, Indexing_With_binary_Predicate_Term) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -3313,6 +3382,7 @@ TEST(CApiTest, Indexing_With_binary_Predicate_Term) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -3440,11 +3510,15 @@ TEST(CApiTest, Indexing_Expr_With_binary_Predicate_Term) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp time = 10000000;
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
CSearchResult c_search_result_on_smallIndex;
|
||||
auto res_before_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_smallIndex);
|
||||
auto res_before_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_smallIndex);
|
||||
ASSERT_EQ(res_before_load_index.error_code, Success);
|
||||
|
||||
// load index to segment
|
||||
@ -3505,6 +3579,7 @@ TEST(CApiTest, Indexing_Expr_With_binary_Predicate_Term) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -3643,7 +3718,7 @@ TEST(CApiTest, SealedSegment_search_float_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp time = 10000000;
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
// load index to segment
|
||||
auto indexing = generate_index(vec_col.data(),
|
||||
@ -3702,6 +3777,7 @@ TEST(CApiTest, SealedSegment_search_float_Predicate_Range) {
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
@ -3780,12 +3856,14 @@ TEST(CApiTest, SealedSegment_search_without_predicates) {
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res = Search(
|
||||
segment, plan, placeholderGroup, {}, N + ts_offset, &search_result);
|
||||
std::cout << res.error_msg << std::endl;
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
CSearchResult search_result2;
|
||||
auto res2 = Search(segment, plan, placeholderGroup, {}, &search_result2);
|
||||
auto res2 = Search(
|
||||
segment, plan, placeholderGroup, {}, N + ts_offset, &search_result2);
|
||||
ASSERT_EQ(res2.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -3874,6 +3952,7 @@ TEST(CApiTest, SealedSegment_search_float_With_Expr_Predicate_Range) {
|
||||
|
||||
std::vector<CPlaceholderGroup> placeholderGroups;
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
Timestamp timestamp = 10000000;
|
||||
|
||||
// load index to segment
|
||||
auto indexing = generate_index(vec_col.data(),
|
||||
@ -3933,8 +4012,12 @@ TEST(CApiTest, SealedSegment_search_float_With_Expr_Predicate_Range) {
|
||||
}
|
||||
|
||||
CSearchResult c_search_result_on_bigIndex;
|
||||
auto res_after_load_index = Search(
|
||||
segment, plan, placeholderGroup, {}, &c_search_result_on_bigIndex);
|
||||
auto res_after_load_index = Search(segment,
|
||||
plan,
|
||||
placeholderGroup,
|
||||
{},
|
||||
timestamp,
|
||||
&c_search_result_on_bigIndex);
|
||||
ASSERT_EQ(res_after_load_index.error_code, Success);
|
||||
|
||||
auto search_result_on_bigIndex = (SearchResult*)c_search_result_on_bigIndex;
|
||||
@ -4230,7 +4313,8 @@ TEST(CApiTest, RANGE_SEARCH_WITH_RADIUS_WHEN_IP) {
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -4293,7 +4377,8 @@ TEST(CApiTest, RANGE_SEARCH_WITH_RADIUS_AND_RANGE_FILTER_WHEN_IP) {
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -4356,7 +4441,8 @@ TEST(CApiTest, RANGE_SEARCH_WITH_RADIUS_WHEN_L2) {
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
@ -4419,7 +4505,8 @@ TEST(CApiTest, RANGE_SEARCH_WITH_RADIUS_AND_RANGE_FILTER_WHEN_L2) {
|
||||
placeholderGroups.push_back(placeholderGroup);
|
||||
|
||||
CSearchResult search_result;
|
||||
auto res = Search(segment, plan, placeholderGroup, {}, &search_result);
|
||||
auto res =
|
||||
Search(segment, plan, placeholderGroup, {}, ts_offset, &search_result);
|
||||
ASSERT_EQ(res.error_code, Success);
|
||||
|
||||
DeleteSearchPlan(plan);
|
||||
|
||||
@ -154,7 +154,7 @@ TEST(Float16, ExecWithoutPredicateFlat) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
int topk = 5;
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
@ -392,7 +392,7 @@ TEST(Float16, ExecWithPredicate) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
int topk = 5;
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
|
||||
@ -1,8 +1,18 @@
|
||||
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
// or implied. See the License for the specific language governing permissions and limitations under the License
|
||||
|
||||
//
|
||||
// Created by zilliz on 2023/12/1.
|
||||
//
|
||||
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "common/Schema.h"
|
||||
#include "segcore/SegmentSealedImpl.h"
|
||||
@ -20,29 +30,29 @@ using namespace milvus::storage;
|
||||
|
||||
const char* METRICS_TYPE = "metric_type";
|
||||
|
||||
|
||||
void
|
||||
prepareSegmentSystemFieldData(const std::unique_ptr<SegmentSealed>& segment,
|
||||
size_t row_count,
|
||||
GeneratedData& data_set){
|
||||
GeneratedData& data_set) {
|
||||
auto field_data =
|
||||
std::make_shared<milvus::FieldData<int64_t>>(DataType::INT64);
|
||||
std::make_shared<milvus::FieldData<int64_t>>(DataType::INT64);
|
||||
field_data->FillFieldData(data_set.row_ids_.data(), row_count);
|
||||
auto field_data_info = FieldDataInfo{
|
||||
RowFieldID.get(), row_count, std::vector<milvus::FieldDataPtr>{field_data}};
|
||||
auto field_data_info =
|
||||
FieldDataInfo{RowFieldID.get(),
|
||||
row_count,
|
||||
std::vector<milvus::FieldDataPtr>{field_data}};
|
||||
segment->LoadFieldData(RowFieldID, field_data_info);
|
||||
|
||||
field_data =
|
||||
std::make_shared<milvus::FieldData<int64_t>>(DataType::INT64);
|
||||
field_data = std::make_shared<milvus::FieldData<int64_t>>(DataType::INT64);
|
||||
field_data->FillFieldData(data_set.timestamps_.data(), row_count);
|
||||
field_data_info =
|
||||
FieldDataInfo{TimestampFieldID.get(),
|
||||
row_count,
|
||||
std::vector<milvus::FieldDataPtr>{field_data}};
|
||||
FieldDataInfo{TimestampFieldID.get(),
|
||||
row_count,
|
||||
std::vector<milvus::FieldDataPtr>{field_data}};
|
||||
segment->LoadFieldData(TimestampFieldID, field_data_info);
|
||||
}
|
||||
|
||||
TEST(GroupBY, Normal2){
|
||||
TEST(GroupBY, Normal2) {
|
||||
using namespace milvus;
|
||||
using namespace milvus::query;
|
||||
using namespace milvus::segcore;
|
||||
@ -51,7 +61,7 @@ TEST(GroupBY, Normal2){
|
||||
int dim = 64;
|
||||
auto schema = std::make_shared<Schema>();
|
||||
auto vec_fid = schema->AddDebugField(
|
||||
"fakevec", DataType::VECTOR_FLOAT, dim, knowhere::metric::L2);
|
||||
"fakevec", DataType::VECTOR_FLOAT, dim, knowhere::metric::L2);
|
||||
auto int8_fid = schema->AddDebugField("int8", DataType::INT8);
|
||||
auto int16_fid = schema->AddDebugField("int16", DataType::INT16);
|
||||
auto int32_fid = schema->AddDebugField("int32", DataType::INT32);
|
||||
@ -71,7 +81,7 @@ TEST(GroupBY, Normal2){
|
||||
auto info = FieldDataInfo(field_data.field_id(), N);
|
||||
auto field_meta = fields.at(FieldId(field_id));
|
||||
info.channel->push(
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
info.channel->close();
|
||||
|
||||
segment->LoadFieldData(FieldId(field_id), info);
|
||||
@ -80,7 +90,8 @@ TEST(GroupBY, Normal2){
|
||||
|
||||
//3. load index
|
||||
auto vector_data = raw_data.get_col<float>(vec_fid);
|
||||
auto indexing = GenVecIndexing(N, dim, vector_data.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
auto indexing = GenVecIndexing(
|
||||
N, dim, vector_data.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
LoadIndexInfo load_index_info;
|
||||
load_index_info.field_id = vec_fid.get();
|
||||
load_index_info.index = std::move(indexing);
|
||||
@ -102,26 +113,34 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<int8_t> i8_set;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<int8_t>(group_by_values[i])){
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<int8_t>(group_by_values[i])) {
|
||||
int8_t g_val = std::get<int8_t>(group_by_values[i]);
|
||||
ASSERT_FALSE(i8_set.count(g_val)>0);//no repetition on groupBy field
|
||||
ASSERT_FALSE(i8_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
i8_set.insert(g_val);
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
@ -146,26 +165,34 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<int16_t> i16_set;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<int16_t>(group_by_values[i])){
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<int16_t>(group_by_values[i])) {
|
||||
int16_t g_val = std::get<int16_t>(group_by_values[i]);
|
||||
ASSERT_FALSE(i16_set.count(g_val)>0);//no repetition on groupBy field
|
||||
ASSERT_FALSE(i16_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
i16_set.insert(g_val);
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
@ -190,26 +217,34 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<int32_t> i32_set;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<int32_t>(group_by_values[i])){
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<int32_t>(group_by_values[i])) {
|
||||
int16_t g_val = std::get<int32_t>(group_by_values[i]);
|
||||
ASSERT_FALSE(i32_set.count(g_val)>0);//no repetition on groupBy field
|
||||
ASSERT_FALSE(i32_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
i32_set.insert(g_val);
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
@ -234,26 +269,34 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<int64_t> i64_set;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<int64_t>(group_by_values[i])){
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<int64_t>(group_by_values[i])) {
|
||||
int16_t g_val = std::get<int64_t>(group_by_values[i]);
|
||||
ASSERT_FALSE(i64_set.count(g_val)>0);//no repetition on groupBy field
|
||||
ASSERT_FALSE(i64_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
i64_set.insert(g_val);
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
@ -278,26 +321,35 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<std::string_view> strs_set;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<std::string_view>(group_by_values[i])){
|
||||
std::string_view g_val = std::get<std::string_view>(group_by_values[i]);
|
||||
ASSERT_FALSE(strs_set.count(g_val)>0);//no repetition on groupBy field
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<std::string_view>(group_by_values[i])) {
|
||||
std::string_view g_val =
|
||||
std::get<std::string_view>(group_by_values[i]);
|
||||
ASSERT_FALSE(strs_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
strs_set.insert(g_val);
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
@ -322,40 +374,48 @@ TEST(GroupBY, Normal2){
|
||||
>)";
|
||||
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 1;
|
||||
auto seed = 1024;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result = segment->Search(plan.get(), ph_group.get());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto search_result =
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
auto& group_by_values = search_result->group_by_values_;
|
||||
ASSERT_EQ(search_result->group_by_values_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(), search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->group_by_values_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
ASSERT_EQ(search_result->distances_.size(),
|
||||
search_result->seg_offsets_.size());
|
||||
|
||||
int size = group_by_values.size();
|
||||
std::unordered_set<bool> bools_set;
|
||||
int boolValCount = 0;
|
||||
float lastDistance = 0.0;
|
||||
for(size_t i = 0; i < size; i++){
|
||||
if(std::holds_alternative<bool>(group_by_values[i])){
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (std::holds_alternative<bool>(group_by_values[i])) {
|
||||
bool g_val = std::get<bool>(group_by_values[i]);
|
||||
ASSERT_FALSE(bools_set.count(g_val)>0);//no repetition on groupBy field
|
||||
ASSERT_FALSE(bools_set.count(g_val) >
|
||||
0); //no repetition on groupBy field
|
||||
bools_set.insert(g_val);
|
||||
boolValCount+=1;
|
||||
boolValCount += 1;
|
||||
auto distance = search_result->distances_.at(i);
|
||||
ASSERT_TRUE(lastDistance<=distance);//distance should be decreased as metrics_type is L2
|
||||
ASSERT_TRUE(
|
||||
lastDistance <=
|
||||
distance); //distance should be decreased as metrics_type is L2
|
||||
lastDistance = distance;
|
||||
} else {
|
||||
//check padding
|
||||
ASSERT_EQ(search_result->seg_offsets_[i], INVALID_SEG_OFFSET);
|
||||
ASSERT_EQ(search_result->distances_[i], 0.0);
|
||||
}
|
||||
ASSERT_TRUE(boolValCount<=2);//bool values cannot exceed two
|
||||
ASSERT_TRUE(boolValCount <= 2); //bool values cannot exceed two
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(GroupBY, Reduce){
|
||||
TEST(GroupBY, Reduce) {
|
||||
using namespace milvus;
|
||||
using namespace milvus::query;
|
||||
using namespace milvus::segcore;
|
||||
@ -364,7 +424,7 @@ TEST(GroupBY, Reduce){
|
||||
int dim = 64;
|
||||
auto schema = std::make_shared<Schema>();
|
||||
auto vec_fid = schema->AddDebugField(
|
||||
"fakevec", DataType::VECTOR_FLOAT, dim, knowhere::metric::L2);
|
||||
"fakevec", DataType::VECTOR_FLOAT, dim, knowhere::metric::L2);
|
||||
auto int64_fid = schema->AddDebugField("int64", DataType::INT64);
|
||||
schema->set_primary_field_id(int64_fid);
|
||||
auto segment1 = CreateSealedSegment(schema);
|
||||
@ -386,7 +446,7 @@ TEST(GroupBY, Reduce){
|
||||
auto info = FieldDataInfo(field_data.field_id(), N);
|
||||
auto field_meta = fields.at(FieldId(field_id));
|
||||
info.channel->push(
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
info.channel->close();
|
||||
segment1->LoadFieldData(FieldId(field_id), info);
|
||||
}
|
||||
@ -398,7 +458,7 @@ TEST(GroupBY, Reduce){
|
||||
auto info = FieldDataInfo(field_data.field_id(), N);
|
||||
auto field_meta = fields.at(FieldId(field_id));
|
||||
info.channel->push(
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
CreateFieldDataFromDataArray(N, &field_data, field_meta));
|
||||
info.channel->close();
|
||||
segment2->LoadFieldData(FieldId(field_id), info);
|
||||
}
|
||||
@ -406,7 +466,8 @@ TEST(GroupBY, Reduce){
|
||||
|
||||
//3. load index
|
||||
auto vector_data_1 = raw_data1.get_col<float>(vec_fid);
|
||||
auto indexing_1 = GenVecIndexing(N, dim, vector_data_1.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
auto indexing_1 = GenVecIndexing(
|
||||
N, dim, vector_data_1.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
LoadIndexInfo load_index_info_1;
|
||||
load_index_info_1.field_id = vec_fid.get();
|
||||
load_index_info_1.index = std::move(indexing_1);
|
||||
@ -414,14 +475,14 @@ TEST(GroupBY, Reduce){
|
||||
segment1->LoadIndex(load_index_info_1);
|
||||
|
||||
auto vector_data_2 = raw_data2.get_col<float>(vec_fid);
|
||||
auto indexing_2 = GenVecIndexing(N, dim, vector_data_2.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
auto indexing_2 = GenVecIndexing(
|
||||
N, dim, vector_data_2.data(), knowhere::IndexEnum::INDEX_HNSW);
|
||||
LoadIndexInfo load_index_info_2;
|
||||
load_index_info_2.field_id = vec_fid.get();
|
||||
load_index_info_2.index = std::move(indexing_2);
|
||||
load_index_info_2.index_params[METRICS_TYPE] = knowhere::metric::L2;
|
||||
segment2->LoadIndex(load_index_info_2);
|
||||
|
||||
|
||||
//4. search group by respectively
|
||||
const char* raw_plan = R"(vector_anns: <
|
||||
field_id: 100
|
||||
@ -435,11 +496,13 @@ TEST(GroupBY, Reduce){
|
||||
|
||||
>)";
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan = CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto num_queries = 10;
|
||||
auto topK = 100;
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, dim, seed);
|
||||
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
CPlaceholderGroup c_ph_group = ph_group.release();
|
||||
CSearchPlan c_plan = plan.release();
|
||||
|
||||
@ -447,9 +510,11 @@ TEST(GroupBY, Reduce){
|
||||
CSegmentInterface c_segment_2 = segment2.release();
|
||||
CSearchResult c_search_res_1;
|
||||
CSearchResult c_search_res_2;
|
||||
auto status = Search(c_segment_1, c_plan, c_ph_group, {}, &c_search_res_1);
|
||||
auto status =
|
||||
Search(c_segment_1, c_plan, c_ph_group, {}, 1L << 63, &c_search_res_1);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
status = Search(c_segment_2, c_plan, c_ph_group, {}, &c_search_res_2);
|
||||
status =
|
||||
Search(c_segment_2, c_plan, c_ph_group, {}, 1L << 63, &c_search_res_2);
|
||||
ASSERT_EQ(status.error_code, Success);
|
||||
std::vector<CSearchResult> results;
|
||||
results.push_back(c_search_res_1);
|
||||
@ -458,23 +523,20 @@ TEST(GroupBY, Reduce){
|
||||
auto slice_nqs = std::vector<int64_t>{num_queries / 2, num_queries / 2};
|
||||
auto slice_topKs = std::vector<int64_t>{topK / 2, topK};
|
||||
CSearchResultDataBlobs cSearchResultData;
|
||||
status = ReduceSearchResultsAndFillData(
|
||||
&cSearchResultData,
|
||||
c_plan,
|
||||
results.data(),
|
||||
results.size(),
|
||||
slice_nqs.data(),
|
||||
slice_topKs.data(),
|
||||
slice_nqs.size()
|
||||
);
|
||||
status = ReduceSearchResultsAndFillData(&cSearchResultData,
|
||||
c_plan,
|
||||
results.data(),
|
||||
results.size(),
|
||||
slice_nqs.data(),
|
||||
slice_topKs.data(),
|
||||
slice_nqs.size());
|
||||
CheckSearchResultDuplicate(results);
|
||||
DeleteSearchResult(c_search_res_1);
|
||||
DeleteSearchResult(c_search_res_2);
|
||||
DeleteSearchResultDataBlobs(cSearchResultData);
|
||||
|
||||
|
||||
DeleteSearchPlan(c_plan);
|
||||
DeletePlaceholderGroup(c_ph_group);
|
||||
DeleteSegment(c_segment_1);
|
||||
DeleteSegment(c_segment_2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,7 +101,9 @@ TEST(GrowingIndex, Correctness) {
|
||||
*schema, plan_str.data(), plan_str.size());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
EXPECT_EQ(sr->total_nq_, num_queries);
|
||||
EXPECT_EQ(sr->unity_topK_, top_k);
|
||||
EXPECT_EQ(sr->distances_.size(), num_queries * top_k);
|
||||
@ -111,7 +113,8 @@ TEST(GrowingIndex, Correctness) {
|
||||
*schema, range_plan_str.data(), range_plan_str.size());
|
||||
auto range_ph_group = ParsePlaceholderGroup(
|
||||
range_plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto range_sr = segment->Search(range_plan.get(), range_ph_group.get());
|
||||
auto range_sr =
|
||||
segment->Search(range_plan.get(), range_ph_group.get(), timestamp);
|
||||
ASSERT_EQ(range_sr->total_nq_, num_queries);
|
||||
EXPECT_EQ(sr->unity_topK_, top_k);
|
||||
EXPECT_EQ(sr->distances_.size(), num_queries * top_k);
|
||||
|
||||
@ -128,8 +128,9 @@ TEST(Query, ExecWithPredicateLoader) {
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, 16, 1024);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
#ifdef __linux__
|
||||
@ -212,7 +213,9 @@ TEST(Query, ExecWithPredicateSmallN) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(2);
|
||||
@ -270,8 +273,9 @@ TEST(Query, ExecWithPredicate) {
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, 16, 1024);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
#ifdef __linux__
|
||||
@ -351,8 +355,9 @@ TEST(Query, ExecTerm) {
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, 16, 1024);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
int topk = 5;
|
||||
auto json = SearchResultToJson(*sr);
|
||||
ASSERT_EQ(sr->total_nq_, num_queries);
|
||||
@ -386,7 +391,8 @@ TEST(Query, ExecEmpty) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
Timestamp timestamp = 1000000;
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
std::cout << SearchResultToJson(*sr);
|
||||
ASSERT_EQ(sr->unity_topK_, 0);
|
||||
|
||||
@ -434,8 +440,8 @@ TEST(Query, ExecWithoutPredicateFlat) {
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, 16, 1024);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
Timestamp timestamp = 1000000;
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
std::vector<std::vector<std::string>> results;
|
||||
auto json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(2);
|
||||
@ -477,8 +483,9 @@ TEST(Query, ExecWithoutPredicate) {
|
||||
auto ph_group_raw = CreatePlaceholderGroup(num_queries, 16, 1024);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
assert_order(*sr, "l2");
|
||||
std::vector<std::vector<std::string>> results;
|
||||
auto json = SearchResultToJson(*sr);
|
||||
@ -546,7 +553,9 @@ TEST(Query, InnerProduct) {
|
||||
CreatePlaceholderGroupFromBlob(num_queries, 16, col.data());
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
|
||||
Timestamp ts = N * 2;
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), ts);
|
||||
assert_order(*sr, "ip");
|
||||
}
|
||||
|
||||
@ -633,6 +642,8 @@ TEST(Query, FillSegment) {
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
auto ph_proto = CreatePlaceholderGroup(10, 16, 443);
|
||||
auto ph = ParsePlaceholderGroup(plan.get(), ph_proto.SerializeAsString());
|
||||
Timestamp ts = N * 2UL;
|
||||
|
||||
auto topk = 5;
|
||||
auto num_queries = 10;
|
||||
|
||||
@ -642,7 +653,7 @@ TEST(Query, FillSegment) {
|
||||
schema->get_field_id(FieldName("fakevec")));
|
||||
plan->target_entries_.push_back(
|
||||
schema->get_field_id(FieldName("the_value")));
|
||||
auto result = segment->Search(plan.get(), ph.get());
|
||||
auto result = segment->Search(plan.get(), ph.get(), ts);
|
||||
result->result_offsets_.resize(topk * num_queries);
|
||||
segment->FillTargetEntry(plan.get(), *result);
|
||||
segment->FillPrimaryKeys(plan.get(), *result);
|
||||
@ -746,7 +757,9 @@ TEST(Query, ExecWithPredicateBinary) {
|
||||
num_queries, 512, vec_ptr.data() + 1024 * 512 / 8);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
query::Json json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(2);
|
||||
|
||||
@ -80,10 +80,11 @@ TEST(Sealed, without_predicate) {
|
||||
CreatePlaceholderGroupFromBlob(num_queries, 16, query_ptr);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
std::vector<const PlaceholderGroup*> ph_group_arr = {ph_group.get()};
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
auto pre_result = SearchResultToJson(*sr);
|
||||
milvus::index::CreateIndexInfo create_index_info;
|
||||
create_index_info.field_type = DataType::VECTOR_FLOAT;
|
||||
@ -127,7 +128,7 @@ TEST(Sealed, without_predicate) {
|
||||
sealed_segment->DropFieldData(fake_id);
|
||||
sealed_segment->LoadIndex(load_info);
|
||||
|
||||
sr = sealed_segment->Search(plan.get(), ph_group.get());
|
||||
sr = sealed_segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
auto post_result = SearchResultToJson(*sr);
|
||||
std::cout << "ref_result" << std::endl;
|
||||
@ -135,6 +136,9 @@ TEST(Sealed, without_predicate) {
|
||||
std::cout << "post_result" << std::endl;
|
||||
std::cout << post_result.dump(1);
|
||||
// ASSERT_EQ(ref_result.dump(1), post_result.dump(1));
|
||||
|
||||
sr = sealed_segment->Search(plan.get(), ph_group.get(), 0);
|
||||
EXPECT_EQ(sr->get_total_result_count(), 0);
|
||||
}
|
||||
|
||||
TEST(Sealed, with_predicate) {
|
||||
@ -196,10 +200,11 @@ TEST(Sealed, with_predicate) {
|
||||
CreatePlaceholderGroupFromBlob(num_queries, 16, query_ptr);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
std::vector<const PlaceholderGroup*> ph_group_arr = {ph_group.get()};
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
milvus::index::CreateIndexInfo create_index_info;
|
||||
create_index_info.field_type = DataType::VECTOR_FLOAT;
|
||||
create_index_info.metric_type = knowhere::metric::L2;
|
||||
@ -242,7 +247,7 @@ TEST(Sealed, with_predicate) {
|
||||
sealed_segment->DropFieldData(fake_id);
|
||||
sealed_segment->LoadIndex(load_info);
|
||||
|
||||
sr = sealed_segment->Search(plan.get(), ph_group.get());
|
||||
sr = sealed_segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
for (int i = 0; i < num_queries; ++i) {
|
||||
auto offset = i * topK;
|
||||
@ -303,6 +308,7 @@ TEST(Sealed, with_predicate_filter_all) {
|
||||
CreatePlaceholderGroupFromBlob(num_queries, 16, query_ptr);
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
Timestamp timestamp = 1000000;
|
||||
|
||||
std::vector<const PlaceholderGroup*> ph_group_arr = {ph_group.get()};
|
||||
|
||||
@ -337,7 +343,7 @@ TEST(Sealed, with_predicate_filter_all) {
|
||||
ivf_sealed_segment->DropFieldData(fake_id);
|
||||
ivf_sealed_segment->LoadIndex(load_info);
|
||||
|
||||
auto sr = ivf_sealed_segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = ivf_sealed_segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
EXPECT_EQ(sr->unity_topK_, 0);
|
||||
EXPECT_EQ(sr->get_total_result_count(), 0);
|
||||
|
||||
@ -372,7 +378,8 @@ TEST(Sealed, with_predicate_filter_all) {
|
||||
hnsw_sealed_segment->DropFieldData(fake_id);
|
||||
hnsw_sealed_segment->LoadIndex(hnsw_load_info);
|
||||
|
||||
auto sr2 = hnsw_sealed_segment->Search(plan.get(), ph_group.get());
|
||||
auto sr2 =
|
||||
hnsw_sealed_segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
EXPECT_EQ(sr2->unity_topK_, 0);
|
||||
EXPECT_EQ(sr2->get_total_result_count(), 0);
|
||||
}
|
||||
@ -400,7 +407,8 @@ TEST(Sealed, LoadFieldData) {
|
||||
|
||||
auto fakevec = dataset.get_col<float>(fakevec_id);
|
||||
|
||||
auto indexing = GenVecIndexing(N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
auto indexing = GenVecIndexing(
|
||||
N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
|
||||
auto segment = CreateSealedSegment(schema);
|
||||
// std::string dsl = R"({
|
||||
@ -456,7 +464,7 @@ TEST(Sealed, LoadFieldData) {
|
||||
>
|
||||
placeholder_tag: "$0"
|
||||
>)";
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
@ -465,13 +473,13 @@ TEST(Sealed, LoadFieldData) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
SealedLoadFieldData(dataset, *segment);
|
||||
segment->Search(plan.get(), ph_group.get());
|
||||
segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
segment->DropFieldData(fakevec_id);
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
LoadIndexInfo vec_info;
|
||||
vec_info.field_id = fakevec_id.get();
|
||||
@ -494,12 +502,12 @@ TEST(Sealed, LoadFieldData) {
|
||||
ASSERT_EQ(chunk_span3[i], ref3[i]);
|
||||
}
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
auto json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(1);
|
||||
|
||||
segment->DropIndex(fakevec_id);
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
}
|
||||
|
||||
TEST(Sealed, LoadFieldDataMmap) {
|
||||
@ -525,7 +533,8 @@ TEST(Sealed, LoadFieldDataMmap) {
|
||||
|
||||
auto fakevec = dataset.get_col<float>(fakevec_id);
|
||||
|
||||
auto indexing = GenVecIndexing(N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
auto indexing = GenVecIndexing(
|
||||
N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
|
||||
auto segment = CreateSealedSegment(schema);
|
||||
const char* raw_plan = R"(vector_anns: <
|
||||
@ -554,7 +563,7 @@ TEST(Sealed, LoadFieldDataMmap) {
|
||||
>
|
||||
placeholder_tag: "$0"
|
||||
>)";
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
@ -563,13 +572,13 @@ TEST(Sealed, LoadFieldDataMmap) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
SealedLoadFieldData(dataset, *segment, {}, true);
|
||||
segment->Search(plan.get(), ph_group.get());
|
||||
segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
|
||||
segment->DropFieldData(fakevec_id);
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
LoadIndexInfo vec_info;
|
||||
vec_info.field_id = fakevec_id.get();
|
||||
@ -592,12 +601,12 @@ TEST(Sealed, LoadFieldDataMmap) {
|
||||
ASSERT_EQ(chunk_span3[i], ref3[i]);
|
||||
}
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
auto json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(1);
|
||||
|
||||
segment->DropIndex(fakevec_id);
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
}
|
||||
|
||||
TEST(Sealed, LoadScalarIndex) {
|
||||
@ -616,7 +625,8 @@ TEST(Sealed, LoadScalarIndex) {
|
||||
|
||||
auto fakevec = dataset.get_col<float>(fakevec_id);
|
||||
|
||||
auto indexing = GenVecIndexing(N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
auto indexing = GenVecIndexing(
|
||||
N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
|
||||
auto segment = CreateSealedSegment(schema);
|
||||
// std::string dsl = R"({
|
||||
@ -672,7 +682,7 @@ TEST(Sealed, LoadScalarIndex) {
|
||||
>
|
||||
placeholder_tag: "$0"
|
||||
>)";
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
@ -731,7 +741,7 @@ TEST(Sealed, LoadScalarIndex) {
|
||||
nothing_index.index = GenScalarIndexing<int32_t>(N, nothing_data.data());
|
||||
segment->LoadIndex(nothing_index);
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), timestamp);
|
||||
auto json = SearchResultToJson(*sr);
|
||||
std::cout << json.dump(1);
|
||||
}
|
||||
@ -780,7 +790,7 @@ TEST(Sealed, Delete) {
|
||||
>
|
||||
placeholder_tag: "$0"
|
||||
>)";
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
@ -789,7 +799,7 @@ TEST(Sealed, Delete) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
SealedLoadFieldData(dataset, *segment);
|
||||
|
||||
@ -864,7 +874,7 @@ TEST(Sealed, OverlapDelete) {
|
||||
>
|
||||
placeholder_tag: "$0"
|
||||
>)";
|
||||
|
||||
Timestamp timestamp = 1000000;
|
||||
auto plan_str = translate_text_plan_to_binary_plan(raw_plan);
|
||||
auto plan =
|
||||
CreateSearchPlanByExpr(*schema, plan_str.data(), plan_str.size());
|
||||
@ -873,7 +883,7 @@ TEST(Sealed, OverlapDelete) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get()));
|
||||
ASSERT_ANY_THROW(segment->Search(plan.get(), ph_group.get(), timestamp));
|
||||
|
||||
SealedLoadFieldData(dataset, *segment);
|
||||
|
||||
@ -991,7 +1001,7 @@ TEST(Sealed, BF) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto result = segment->Search(plan.get(), ph_group.get());
|
||||
auto result = segment->Search(plan.get(), ph_group.get(), MAX_TIMESTAMP);
|
||||
auto ves = SearchResultToVector(*result);
|
||||
// first: offset, second: distance
|
||||
EXPECT_GE(ves[0].first, 0);
|
||||
@ -1045,7 +1055,7 @@ TEST(Sealed, BF_Overflow) {
|
||||
auto ph_group =
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
auto result = segment->Search(plan.get(), ph_group.get());
|
||||
auto result = segment->Search(plan.get(), ph_group.get(), MAX_TIMESTAMP);
|
||||
auto ves = SearchResultToVector(*result);
|
||||
for (int i = 0; i < num_queries; ++i) {
|
||||
EXPECT_EQ(ves[0].first, -1);
|
||||
@ -1135,7 +1145,8 @@ TEST(Sealed, GetVector) {
|
||||
|
||||
auto fakevec = dataset.get_col<float>(fakevec_id);
|
||||
|
||||
auto indexing = GenVecIndexing(N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
auto indexing = GenVecIndexing(
|
||||
N, dim, fakevec.data(), knowhere::IndexEnum::INDEX_FAISS_IVFFLAT);
|
||||
|
||||
auto segment_sealed = CreateSealedSegment(schema);
|
||||
|
||||
@ -1322,7 +1333,7 @@ TEST(Sealed, LoadArrayFieldData) {
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
SealedLoadFieldData(dataset, *segment);
|
||||
segment->Search(plan.get(), ph_group.get());
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
|
||||
auto ids_ds = GenRandomIds(N);
|
||||
auto s = dynamic_cast<SegmentSealedImpl*>(segment.get());
|
||||
@ -1379,7 +1390,7 @@ TEST(Sealed, LoadArrayFieldDataWithMMap) {
|
||||
ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
|
||||
|
||||
SealedLoadFieldData(dataset, *segment, {}, true);
|
||||
segment->Search(plan.get(), ph_group.get());
|
||||
segment->Search(plan.get(), ph_group.get(), 1L << 63);
|
||||
}
|
||||
|
||||
TEST(Sealed, SkipIndexSkipUnaryRange) {
|
||||
|
||||
@ -726,7 +726,7 @@ TEST(AlwaysTrueStringPlan, SearchWithOutputFields) {
|
||||
auto sub_result = BruteForceSearch(
|
||||
search_dataset, vec_col.data(), N, knowhere::Json(), nullptr);
|
||||
|
||||
auto sr = segment->Search(plan.get(), ph_group.get());
|
||||
auto sr = segment->Search(plan.get(), ph_group.get(), MAX_TIMESTAMP);
|
||||
segment->FillPrimaryKeys(plan.get(), *sr);
|
||||
segment->FillTargetEntry(plan.get(), *sr);
|
||||
ASSERT_EQ(sr->pk_type_, DataType::VARCHAR);
|
||||
|
||||
@ -904,7 +904,10 @@ SealedCreator(SchemaPtr schema, const GeneratedData& dataset) {
|
||||
}
|
||||
|
||||
inline std::unique_ptr<milvus::index::VectorIndex>
|
||||
GenVecIndexing(int64_t N, int64_t dim, const float* vec, const char* index_type) {
|
||||
GenVecIndexing(int64_t N,
|
||||
int64_t dim,
|
||||
const float* vec,
|
||||
const char* index_type) {
|
||||
auto conf =
|
||||
knowhere::Json{{knowhere::meta::METRIC_TYPE, knowhere::metric::L2},
|
||||
{knowhere::meta::DIM, std::to_string(dim)},
|
||||
|
||||
@ -37,9 +37,9 @@ using namespace milvus;
|
||||
using namespace milvus::segcore;
|
||||
|
||||
namespace {
|
||||
const char*
|
||||
get_default_schema_config() {
|
||||
static std::string conf = R"(name: "default-collection"
|
||||
const char*
|
||||
get_default_schema_config() {
|
||||
static std::string conf = R"(name: "default-collection"
|
||||
fields: <
|
||||
fieldID: 100
|
||||
name: "fakevec"
|
||||
@ -59,81 +59,81 @@ namespace {
|
||||
data_type: Int64
|
||||
is_primary_key: true
|
||||
>)";
|
||||
static std::string fake_conf = "";
|
||||
return conf.c_str();
|
||||
}
|
||||
static std::string fake_conf = "";
|
||||
return conf.c_str();
|
||||
}
|
||||
|
||||
std::string
|
||||
generate_max_float_query_data(int all_nq, int max_float_nq) {
|
||||
assert(max_float_nq <= all_nq);
|
||||
namespace ser = milvus::proto::common;
|
||||
int dim = DIM;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
value->set_type(ser::PlaceholderType::FloatVector);
|
||||
for (int i = 0; i < all_nq; ++i) {
|
||||
std::vector<float> vec;
|
||||
if (i < max_float_nq) {
|
||||
for (int d = 0; d < dim; ++d) {
|
||||
vec.push_back(std::numeric_limits<float>::max());
|
||||
}
|
||||
} else {
|
||||
for (int d = 0; d < dim; ++d) {
|
||||
vec.push_back(1);
|
||||
}
|
||||
}
|
||||
value->add_values(vec.data(), vec.size() * sizeof(float));
|
||||
}
|
||||
auto blob = raw_group.SerializeAsString();
|
||||
return blob;
|
||||
}
|
||||
std::string
|
||||
generate_query_data(int nq) {
|
||||
namespace ser = milvus::proto::common;
|
||||
std::default_random_engine e(67);
|
||||
int dim = DIM;
|
||||
std::normal_distribution<double> dis(0.0, 1.0);
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
value->set_type(ser::PlaceholderType::FloatVector);
|
||||
for (int i = 0; i < nq; ++i) {
|
||||
std::vector<float> vec;
|
||||
std::string
|
||||
generate_max_float_query_data(int all_nq, int max_float_nq) {
|
||||
assert(max_float_nq <= all_nq);
|
||||
namespace ser = milvus::proto::common;
|
||||
int dim = DIM;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
value->set_type(ser::PlaceholderType::FloatVector);
|
||||
for (int i = 0; i < all_nq; ++i) {
|
||||
std::vector<float> vec;
|
||||
if (i < max_float_nq) {
|
||||
for (int d = 0; d < dim; ++d) {
|
||||
vec.push_back(dis(e));
|
||||
vec.push_back(std::numeric_limits<float>::max());
|
||||
}
|
||||
} else {
|
||||
for (int d = 0; d < dim; ++d) {
|
||||
vec.push_back(1);
|
||||
}
|
||||
value->add_values(vec.data(), vec.size() * sizeof(float));
|
||||
}
|
||||
auto blob = raw_group.SerializeAsString();
|
||||
return blob;
|
||||
value->add_values(vec.data(), vec.size() * sizeof(float));
|
||||
}
|
||||
void
|
||||
CheckSearchResultDuplicate(const std::vector<CSearchResult>& results) {
|
||||
auto nq = ((SearchResult*)results[0])->total_nq_;
|
||||
auto blob = raw_group.SerializeAsString();
|
||||
return blob;
|
||||
}
|
||||
std::string
|
||||
generate_query_data(int nq) {
|
||||
namespace ser = milvus::proto::common;
|
||||
std::default_random_engine e(67);
|
||||
int dim = DIM;
|
||||
std::normal_distribution<double> dis(0.0, 1.0);
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
value->set_type(ser::PlaceholderType::FloatVector);
|
||||
for (int i = 0; i < nq; ++i) {
|
||||
std::vector<float> vec;
|
||||
for (int d = 0; d < dim; ++d) {
|
||||
vec.push_back(dis(e));
|
||||
}
|
||||
value->add_values(vec.data(), vec.size() * sizeof(float));
|
||||
}
|
||||
auto blob = raw_group.SerializeAsString();
|
||||
return blob;
|
||||
}
|
||||
void
|
||||
CheckSearchResultDuplicate(const std::vector<CSearchResult>& results) {
|
||||
auto nq = ((SearchResult*)results[0])->total_nq_;
|
||||
|
||||
std::unordered_set<PkType> pk_set;
|
||||
std::unordered_set<GroupByValueType> group_by_val_set;
|
||||
for (int qi = 0; qi < nq; qi++) {
|
||||
pk_set.clear();
|
||||
group_by_val_set.clear();
|
||||
for (size_t i = 0; i < results.size(); i++) {
|
||||
auto search_result = (SearchResult*)results[i];
|
||||
ASSERT_EQ(nq, search_result->total_nq_);
|
||||
auto topk_beg = search_result->topk_per_nq_prefix_sum_[qi];
|
||||
auto topk_end = search_result->topk_per_nq_prefix_sum_[qi + 1];
|
||||
for (size_t ki = topk_beg; ki < topk_end; ki++) {
|
||||
ASSERT_NE(search_result->seg_offsets_[ki], INVALID_SEG_OFFSET);
|
||||
auto ret = pk_set.insert(search_result->primary_keys_[ki]);
|
||||
ASSERT_TRUE(ret.second);
|
||||
std::unordered_set<PkType> pk_set;
|
||||
std::unordered_set<GroupByValueType> group_by_val_set;
|
||||
for (int qi = 0; qi < nq; qi++) {
|
||||
pk_set.clear();
|
||||
group_by_val_set.clear();
|
||||
for (size_t i = 0; i < results.size(); i++) {
|
||||
auto search_result = (SearchResult*)results[i];
|
||||
ASSERT_EQ(nq, search_result->total_nq_);
|
||||
auto topk_beg = search_result->topk_per_nq_prefix_sum_[qi];
|
||||
auto topk_end = search_result->topk_per_nq_prefix_sum_[qi + 1];
|
||||
for (size_t ki = topk_beg; ki < topk_end; ki++) {
|
||||
ASSERT_NE(search_result->seg_offsets_[ki], INVALID_SEG_OFFSET);
|
||||
auto ret = pk_set.insert(search_result->primary_keys_[ki]);
|
||||
ASSERT_TRUE(ret.second);
|
||||
|
||||
if(search_result->group_by_values_.size()>ki){
|
||||
auto group_by_val = search_result->group_by_values_[ki];
|
||||
ASSERT_TRUE(group_by_val_set.count(group_by_val)==0);
|
||||
group_by_val_set.insert(group_by_val);
|
||||
}
|
||||
if (search_result->group_by_values_.size() > ki) {
|
||||
auto group_by_val = search_result->group_by_values_[ki];
|
||||
ASSERT_TRUE(group_by_val_set.count(group_by_val) == 0);
|
||||
group_by_val_set.insert(group_by_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@ -94,6 +94,7 @@ message SearchRequest {
|
||||
common.DslType dsl_type = 8;
|
||||
bytes serialized_expr_plan = 9;
|
||||
repeated int64 output_fields_id = 10;
|
||||
uint64 mvcc_timestamp = 11;
|
||||
uint64 guarantee_timestamp = 12;
|
||||
uint64 timeout_timestamp = 13;
|
||||
int64 nq = 14;
|
||||
@ -120,6 +121,7 @@ message SearchResults {
|
||||
|
||||
// search request cost
|
||||
CostAggregation costAggregation = 13;
|
||||
map<string, uint64> channels_mvcc = 14;
|
||||
}
|
||||
|
||||
message CostAggregation {
|
||||
@ -160,7 +162,7 @@ message RetrieveResults {
|
||||
repeated int64 global_sealed_segmentIDs = 8;
|
||||
|
||||
// query request cost
|
||||
CostAggregation costAggregation = 13;
|
||||
CostAggregation costAggregation = 13;
|
||||
}
|
||||
|
||||
message LoadIndex {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -32,7 +32,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
type executeFunc func(context.Context, UniqueID, types.QueryNodeClient, ...string) error
|
||||
type executeFunc func(context.Context, UniqueID, types.QueryNodeClient, string) error
|
||||
|
||||
type ChannelWorkload struct {
|
||||
db string
|
||||
|
||||
@ -248,7 +248,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
retryTimes: 1,
|
||||
@ -265,7 +265,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
retryTimes: 1,
|
||||
@ -285,7 +285,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
retryTimes: 1,
|
||||
@ -303,7 +303,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
retryTimes: 2,
|
||||
@ -324,7 +324,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
counter++
|
||||
if counter == 1 {
|
||||
return errors.New("fake error")
|
||||
@ -349,7 +349,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
|
||||
channel: s.channels[0],
|
||||
shardLeaders: s.nodes,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
_, err := qn.Search(ctx, nil)
|
||||
return err
|
||||
},
|
||||
@ -370,7 +370,7 @@ func (s *LBPolicySuite) TestExecute() {
|
||||
collectionName: s.collectionName,
|
||||
collectionID: s.collectionID,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
@ -383,7 +383,7 @@ func (s *LBPolicySuite) TestExecute() {
|
||||
collectionName: s.collectionName,
|
||||
collectionID: s.collectionID,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
// succeed in first execute
|
||||
if counter.Add(1) == 1 {
|
||||
return nil
|
||||
@ -404,7 +404,7 @@ func (s *LBPolicySuite) TestExecute() {
|
||||
collectionName: s.collectionName,
|
||||
collectionID: s.collectionID,
|
||||
nq: 1,
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, s ...string) error {
|
||||
exec: func(ctx context.Context, ui UniqueID, qn types.QueryNodeClient, channel string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
@ -350,7 +350,7 @@ func (dr *deleteRunner) produce(ctx context.Context, primaryKeys *schemapb.IDs)
|
||||
// getStreamingQueryAndDelteFunc return query function used by LBPolicy
|
||||
// make sure it concurrent safe
|
||||
func (dr *deleteRunner) getStreamingQueryAndDelteFunc(plan *planpb.PlanNode) executeFunc {
|
||||
return func(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channelIDs ...string) error {
|
||||
return func(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channel string) error {
|
||||
var partitionIDs []int64
|
||||
|
||||
// optimize query when partitionKey on
|
||||
@ -375,7 +375,7 @@ func (dr *deleteRunner) getStreamingQueryAndDelteFunc(plan *planpb.PlanNode) exe
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", dr.collectionID),
|
||||
zap.Int64s("partitionIDs", partitionIDs),
|
||||
zap.Strings("channels", channelIDs),
|
||||
zap.String("channel", channel),
|
||||
zap.Int64("nodeID", nodeID))
|
||||
|
||||
// set plan
|
||||
@ -405,7 +405,7 @@ func (dr *deleteRunner) getStreamingQueryAndDelteFunc(plan *planpb.PlanNode) exe
|
||||
OutputFieldsId: outputFieldIDs,
|
||||
GuaranteeTimestamp: parseGuaranteeTsFromConsistency(dr.ts, dr.ts, dr.req.GetConsistencyLevel()),
|
||||
},
|
||||
DmlChannels: channelIDs,
|
||||
DmlChannels: []string{channel},
|
||||
Scope: querypb.DataScope_All,
|
||||
}
|
||||
|
||||
|
||||
@ -546,7 +546,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
||||
},
|
||||
}
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Call.Return(func(ctx context.Context, workload CollectionWorkLoad) error {
|
||||
return workload.exec(ctx, 1, qn)
|
||||
return workload.exec(ctx, 1, qn, "")
|
||||
})
|
||||
|
||||
qn.EXPECT().QueryStream(mock.Anything, mock.Anything).Return(nil, errors.New("mock error"))
|
||||
@ -591,7 +591,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
||||
stream.EXPECT().Produce(mock.Anything).Return(nil)
|
||||
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Call.Return(func(ctx context.Context, workload CollectionWorkLoad) error {
|
||||
return workload.exec(ctx, 1, qn)
|
||||
return workload.exec(ctx, 1, qn, "")
|
||||
})
|
||||
|
||||
qn.EXPECT().QueryStream(mock.Anything, mock.Anything).Call.Return(
|
||||
@ -654,7 +654,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
||||
mockMgr.EXPECT().getOrCreateDmlStream(mock.Anything).Return(stream, nil)
|
||||
mockMgr.EXPECT().getChannels(collectionID).Return(channels, nil)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Call.Return(func(ctx context.Context, workload CollectionWorkLoad) error {
|
||||
return workload.exec(ctx, 1, qn)
|
||||
return workload.exec(ctx, 1, qn, "")
|
||||
})
|
||||
|
||||
qn.EXPECT().QueryStream(mock.Anything, mock.Anything).Call.Return(
|
||||
@ -716,7 +716,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
||||
mockMgr.EXPECT().getOrCreateDmlStream(mock.Anything).Return(stream, nil)
|
||||
mockMgr.EXPECT().getChannels(collectionID).Return(channels, nil)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Call.Return(func(ctx context.Context, workload CollectionWorkLoad) error {
|
||||
return workload.exec(ctx, 1, qn)
|
||||
return workload.exec(ctx, 1, qn, "")
|
||||
})
|
||||
|
||||
qn.EXPECT().QueryStream(mock.Anything, mock.Anything).Call.Return(
|
||||
@ -797,7 +797,7 @@ func TestDeleteRunner_Run(t *testing.T) {
|
||||
mockMgr.EXPECT().getOrCreateDmlStream(mock.Anything).Return(stream, nil)
|
||||
mockMgr.EXPECT().getChannels(collectionID).Return(channels, nil)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Call.Return(func(ctx context.Context, workload CollectionWorkLoad) error {
|
||||
return workload.exec(ctx, 1, qn)
|
||||
return workload.exec(ctx, 1, qn, "")
|
||||
})
|
||||
|
||||
qn.EXPECT().QueryStream(mock.Anything, mock.Anything).Call.Return(
|
||||
@ -899,7 +899,7 @@ func TestDeleteRunner_StreamingQueryAndDelteFunc(t *testing.T) {
|
||||
qn := mocks.NewMockQueryNodeClient(t)
|
||||
// witho out plan
|
||||
queryFunc := dr.getStreamingQueryAndDelteFunc(nil)
|
||||
assert.Error(t, queryFunc(ctx, 1, qn))
|
||||
assert.Error(t, queryFunc(ctx, 1, qn, ""))
|
||||
})
|
||||
|
||||
t.Run("partitionKey mode get meta failed", func(t *testing.T) {
|
||||
@ -938,7 +938,7 @@ func TestDeleteRunner_StreamingQueryAndDelteFunc(t *testing.T) {
|
||||
plan, err := planparserv2.CreateRetrievePlan(dr.schema.CollectionSchema, dr.req.Expr)
|
||||
assert.NoError(t, err)
|
||||
queryFunc := dr.getStreamingQueryAndDelteFunc(plan)
|
||||
assert.Error(t, queryFunc(ctx, 1, qn))
|
||||
assert.Error(t, queryFunc(ctx, 1, qn, ""))
|
||||
})
|
||||
|
||||
t.Run("partitionKey mode get partition ID failed", func(t *testing.T) {
|
||||
@ -981,6 +981,6 @@ func TestDeleteRunner_StreamingQueryAndDelteFunc(t *testing.T) {
|
||||
plan, err := planparserv2.CreateRetrievePlan(dr.schema.CollectionSchema, dr.req.Expr)
|
||||
assert.NoError(t, err)
|
||||
queryFunc := dr.getStreamingQueryAndDelteFunc(plan)
|
||||
assert.Error(t, queryFunc(ctx, 1, qn))
|
||||
assert.Error(t, queryFunc(ctx, 1, qn, ""))
|
||||
})
|
||||
}
|
||||
|
||||
@ -42,9 +42,10 @@ type hybridSearchTask struct {
|
||||
|
||||
userOutputFields []string
|
||||
|
||||
qc types.QueryCoordClient
|
||||
node types.ProxyComponent
|
||||
lb LBPolicy
|
||||
qc types.QueryCoordClient
|
||||
node types.ProxyComponent
|
||||
lb LBPolicy
|
||||
queryChannelsTs map[string]Timestamp
|
||||
|
||||
collectionID UniqueID
|
||||
|
||||
@ -296,7 +297,8 @@ func (t *hybridSearchTask) Requery() error {
|
||||
UseDefaultConsistency: t.request.GetUseDefaultConsistency(),
|
||||
}
|
||||
|
||||
return doRequery(t.ctx, t.collectionID, t.node, t.schema.CollectionSchema, queryReq, t.result)
|
||||
// TODO:Xige-16 refine the mvcc functionality of hybrid search
|
||||
return doRequery(t.ctx, t.collectionID, t.node, t.schema.CollectionSchema, queryReq, t.result, t.queryChannelsTs)
|
||||
}
|
||||
|
||||
func rankSearchResultData(ctx context.Context,
|
||||
|
||||
@ -61,6 +61,8 @@ type queryTask struct {
|
||||
plan *planpb.PlanNode
|
||||
partitionKeyMode bool
|
||||
lb LBPolicy
|
||||
channelsMvcc map[string]Timestamp
|
||||
fastSkip bool
|
||||
}
|
||||
|
||||
type queryParams struct {
|
||||
@ -467,19 +469,33 @@ func (t *queryTask) PostExecute(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *queryTask) queryShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channelIDs ...string) error {
|
||||
func (t *queryTask) queryShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channel string) error {
|
||||
needOverrideMvcc := false
|
||||
mvccTs := t.MvccTimestamp
|
||||
if len(t.channelsMvcc) > 0 {
|
||||
mvccTs, needOverrideMvcc = t.channelsMvcc[channel]
|
||||
// In fast mode, if there is no corresponding channel in channelsMvcc, quickly skip this query.
|
||||
if !needOverrideMvcc && t.fastSkip {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
retrieveReq := typeutil.Clone(t.RetrieveRequest)
|
||||
retrieveReq.GetBase().TargetID = nodeID
|
||||
if needOverrideMvcc && mvccTs > 0 {
|
||||
retrieveReq.MvccTimestamp = mvccTs
|
||||
}
|
||||
|
||||
req := &querypb.QueryRequest{
|
||||
Req: retrieveReq,
|
||||
DmlChannels: channelIDs,
|
||||
DmlChannels: []string{channel},
|
||||
Scope: querypb.DataScope_All,
|
||||
}
|
||||
|
||||
log := log.Ctx(ctx).With(zap.Int64("collection", t.GetCollectionID()),
|
||||
zap.Int64s("partitionIDs", t.GetPartitionIDs()),
|
||||
zap.Int64("nodeID", nodeID),
|
||||
zap.Strings("channels", channelIDs))
|
||||
zap.String("channel", channel))
|
||||
|
||||
result, err := qn.Query(ctx, req)
|
||||
if err != nil {
|
||||
|
||||
@ -63,9 +63,10 @@ type searchTask struct {
|
||||
offset int64
|
||||
resultBuf *typeutil.ConcurrentSet[*internalpb.SearchResults]
|
||||
|
||||
qc types.QueryCoordClient
|
||||
node types.ProxyComponent
|
||||
lb LBPolicy
|
||||
qc types.QueryCoordClient
|
||||
node types.ProxyComponent
|
||||
lb LBPolicy
|
||||
queryChannelsTs map[string]Timestamp
|
||||
}
|
||||
|
||||
func getPartitionIDs(ctx context.Context, dbName string, collectionName string, partitionNames []string) (partitionIDs []UniqueID, err error) {
|
||||
@ -488,6 +489,13 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
t.queryChannelsTs = make(map[string]uint64)
|
||||
for _, r := range toReduceResults {
|
||||
for ch, ts := range r.GetChannelsMvcc() {
|
||||
t.queryChannelsTs[ch] = ts
|
||||
}
|
||||
}
|
||||
|
||||
if len(toReduceResults) >= 1 {
|
||||
MetricType = toReduceResults[0].GetMetricType()
|
||||
}
|
||||
@ -545,20 +553,20 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *searchTask) searchShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channelIDs ...string) error {
|
||||
func (t *searchTask) searchShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channel string) error {
|
||||
searchReq := typeutil.Clone(t.SearchRequest)
|
||||
searchReq.GetBase().TargetID = nodeID
|
||||
req := &querypb.SearchRequest{
|
||||
Req: searchReq,
|
||||
DmlChannels: channelIDs,
|
||||
DmlChannels: []string{channel},
|
||||
Scope: querypb.DataScope_All,
|
||||
TotalChannelNum: int32(len(channelIDs)),
|
||||
TotalChannelNum: int32(1),
|
||||
}
|
||||
|
||||
log := log.Ctx(ctx).With(zap.Int64("collection", t.GetCollectionID()),
|
||||
zap.Int64s("partitionIDs", t.GetPartitionIDs()),
|
||||
zap.Int64("nodeID", nodeID),
|
||||
zap.Strings("channels", channelIDs))
|
||||
zap.String("channel", channel))
|
||||
|
||||
var result *internalpb.SearchResults
|
||||
var err error
|
||||
@ -619,7 +627,7 @@ func (t *searchTask) Requery() error {
|
||||
QueryParams: t.request.GetSearchParams(),
|
||||
}
|
||||
|
||||
return doRequery(t.ctx, t.GetCollectionID(), t.node, t.schema.CollectionSchema, queryReq, t.result)
|
||||
return doRequery(t.ctx, t.GetCollectionID(), t.node, t.schema.CollectionSchema, queryReq, t.result, t.queryChannelsTs)
|
||||
}
|
||||
|
||||
func (t *searchTask) fillInEmptyResult(numQueries int64) {
|
||||
@ -672,6 +680,7 @@ func doRequery(ctx context.Context,
|
||||
schema *schemapb.CollectionSchema,
|
||||
request *milvuspb.QueryRequest,
|
||||
result *milvuspb.SearchResults,
|
||||
queryChannelsTs map[string]Timestamp,
|
||||
) error {
|
||||
outputFields := request.GetOutputFields()
|
||||
pkField, err := typeutil.GetPrimaryFieldSchema(schema)
|
||||
@ -680,7 +689,10 @@ func doRequery(ctx context.Context,
|
||||
}
|
||||
ids := result.GetResults().GetIds()
|
||||
plan := planparserv2.CreateRequeryPlan(pkField, ids)
|
||||
|
||||
channelsMvcc := make(map[string]Timestamp)
|
||||
for k, v := range queryChannelsTs {
|
||||
channelsMvcc[k] = v
|
||||
}
|
||||
qt := &queryTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
@ -691,10 +703,12 @@ func doRequery(ctx context.Context,
|
||||
),
|
||||
ReqID: paramtable.GetNodeID(),
|
||||
},
|
||||
request: request,
|
||||
plan: plan,
|
||||
qc: node.(*Proxy).queryCoord,
|
||||
lb: node.(*Proxy).lbPolicy,
|
||||
request: request,
|
||||
plan: plan,
|
||||
qc: node.(*Proxy).queryCoord,
|
||||
lb: node.(*Proxy).lbPolicy,
|
||||
channelsMvcc: channelsMvcc,
|
||||
fastSkip: true,
|
||||
}
|
||||
queryResult, err := node.(*Proxy).query(ctx, qt)
|
||||
if err != nil {
|
||||
|
||||
@ -2061,7 +2061,7 @@ func TestSearchTask_Requery(t *testing.T) {
|
||||
|
||||
lb := NewMockLBPolicy(t)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
||||
err = workload.exec(ctx, 0, qn)
|
||||
err = workload.exec(ctx, 0, qn, "")
|
||||
assert.NoError(t, err)
|
||||
}).Return(nil)
|
||||
lb.EXPECT().UpdateCostMetrics(mock.Anything, mock.Anything).Return()
|
||||
@ -2141,7 +2141,7 @@ func TestSearchTask_Requery(t *testing.T) {
|
||||
|
||||
lb := NewMockLBPolicy(t)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
||||
_ = workload.exec(ctx, 0, qn)
|
||||
_ = workload.exec(ctx, 0, qn, "")
|
||||
}).Return(fmt.Errorf("mock err 1"))
|
||||
node.lbPolicy = lb
|
||||
|
||||
@ -2175,7 +2175,7 @@ func TestSearchTask_Requery(t *testing.T) {
|
||||
|
||||
lb := NewMockLBPolicy(t)
|
||||
lb.EXPECT().Execute(mock.Anything, mock.Anything).Run(func(ctx context.Context, workload CollectionWorkLoad) {
|
||||
_ = workload.exec(ctx, 0, qn)
|
||||
_ = workload.exec(ctx, 0, qn, "")
|
||||
}).Return(fmt.Errorf("mock err 1"))
|
||||
node.lbPolicy = lb
|
||||
|
||||
|
||||
@ -273,19 +273,19 @@ func (g *getStatisticsTask) getStatisticsFromQueryNode(ctx context.Context) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *getStatisticsTask) getStatisticsShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channelIDs ...string) error {
|
||||
func (g *getStatisticsTask) getStatisticsShard(ctx context.Context, nodeID int64, qn types.QueryNodeClient, channel string) error {
|
||||
nodeReq := proto.Clone(g.GetStatisticsRequest).(*internalpb.GetStatisticsRequest)
|
||||
nodeReq.Base.TargetID = nodeID
|
||||
req := &querypb.GetStatisticsRequest{
|
||||
Req: nodeReq,
|
||||
DmlChannels: channelIDs,
|
||||
DmlChannels: []string{channel},
|
||||
Scope: querypb.DataScope_All,
|
||||
}
|
||||
result, err := qn.GetStatistics(ctx, req)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode statistic return error",
|
||||
zap.Int64("nodeID", nodeID),
|
||||
zap.Strings("channels", channelIDs),
|
||||
zap.String("channel", channel),
|
||||
zap.Error(err))
|
||||
globalMetaCache.DeprecateShardCache(g.request.GetDbName(), g.collectionName)
|
||||
return err
|
||||
@ -293,7 +293,7 @@ func (g *getStatisticsTask) getStatisticsShard(ctx context.Context, nodeID int64
|
||||
if result.GetStatus().GetErrorCode() == commonpb.ErrorCode_NotShardLeader {
|
||||
log.Warn("QueryNode is not shardLeader",
|
||||
zap.Int64("nodeID", nodeID),
|
||||
zap.Strings("channels", channelIDs))
|
||||
zap.String("channel", channel))
|
||||
globalMetaCache.DeprecateShardCache(g.request.GetDbName(), g.collectionName)
|
||||
return errInvalidShardLeaders
|
||||
}
|
||||
|
||||
@ -204,11 +204,14 @@ func (sd *shardDelegator) Search(ctx context.Context, req *querypb.SearchRequest
|
||||
|
||||
// wait tsafe
|
||||
waitTr := timerecord.NewTimeRecorder("wait tSafe")
|
||||
err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
tSafe, err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
if err != nil {
|
||||
log.Warn("delegator search failed to wait tsafe", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if req.GetReq().GetMvccTimestamp() == 0 {
|
||||
req.Req.MvccTimestamp = tSafe
|
||||
}
|
||||
metrics.QueryNodeSQLatencyWaitTSafe.WithLabelValues(
|
||||
fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).
|
||||
Observe(float64(waitTr.ElapseSpan().Milliseconds()))
|
||||
@ -279,11 +282,14 @@ func (sd *shardDelegator) QueryStream(ctx context.Context, req *querypb.QueryReq
|
||||
|
||||
// wait tsafe
|
||||
waitTr := timerecord.NewTimeRecorder("wait tSafe")
|
||||
err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
tSafe, err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
if err != nil {
|
||||
log.Warn("delegator query failed to wait tsafe", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
if req.GetReq().GetMvccTimestamp() == 0 {
|
||||
req.Req.MvccTimestamp = tSafe
|
||||
}
|
||||
metrics.QueryNodeSQLatencyWaitTSafe.WithLabelValues(
|
||||
fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel).
|
||||
Observe(float64(waitTr.ElapseSpan().Milliseconds()))
|
||||
@ -347,11 +353,14 @@ func (sd *shardDelegator) Query(ctx context.Context, req *querypb.QueryRequest)
|
||||
|
||||
// wait tsafe
|
||||
waitTr := timerecord.NewTimeRecorder("wait tSafe")
|
||||
err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
tSafe, err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
if err != nil {
|
||||
log.Warn("delegator query failed to wait tsafe", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if req.GetReq().GetMvccTimestamp() == 0 {
|
||||
req.Req.MvccTimestamp = tSafe
|
||||
}
|
||||
metrics.QueryNodeSQLatencyWaitTSafe.WithLabelValues(
|
||||
fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel).
|
||||
Observe(float64(waitTr.ElapseSpan().Milliseconds()))
|
||||
@ -410,7 +419,7 @@ func (sd *shardDelegator) GetStatistics(ctx context.Context, req *querypb.GetSta
|
||||
}
|
||||
|
||||
// wait tsafe
|
||||
err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
_, err := sd.waitTSafe(ctx, req.Req.GuaranteeTimestamp)
|
||||
if err != nil {
|
||||
log.Warn("delegator GetStatistics failed to wait tsafe", zap.Error(err))
|
||||
return nil, err
|
||||
@ -552,14 +561,15 @@ func executeSubTasks[T any, R interface {
|
||||
}
|
||||
|
||||
// waitTSafe returns when tsafe listener notifies a timestamp which meet the guarantee ts.
|
||||
func (sd *shardDelegator) waitTSafe(ctx context.Context, ts uint64) error {
|
||||
func (sd *shardDelegator) waitTSafe(ctx context.Context, ts uint64) (uint64, error) {
|
||||
log := sd.getLogger(ctx)
|
||||
// already safe to search
|
||||
if sd.latestTsafe.Load() >= ts {
|
||||
return nil
|
||||
latestTSafe := sd.latestTsafe.Load()
|
||||
if latestTSafe >= ts {
|
||||
return latestTSafe, nil
|
||||
}
|
||||
// check lag duration too large
|
||||
st, _ := tsoutil.ParseTS(sd.latestTsafe.Load())
|
||||
st, _ := tsoutil.ParseTS(latestTSafe)
|
||||
gt, _ := tsoutil.ParseTS(ts)
|
||||
lag := gt.Sub(st)
|
||||
maxLag := paramtable.Get().QueryNodeCfg.MaxTimestampLag.GetAsDuration(time.Second)
|
||||
@ -570,7 +580,7 @@ func (sd *shardDelegator) waitTSafe(ctx context.Context, ts uint64) error {
|
||||
zap.Duration("lag", lag),
|
||||
zap.Duration("maxTsLag", maxLag),
|
||||
)
|
||||
return WrapErrTsLagTooLarge(lag, maxLag)
|
||||
return 0, WrapErrTsLagTooLarge(lag, maxLag)
|
||||
}
|
||||
|
||||
ch := make(chan struct{})
|
||||
@ -592,12 +602,12 @@ func (sd *shardDelegator) waitTSafe(ctx context.Context, ts uint64) error {
|
||||
case <-ctx.Done():
|
||||
// notify wait goroutine to quit
|
||||
sd.tsCond.Broadcast()
|
||||
return ctx.Err()
|
||||
return 0, ctx.Err()
|
||||
case <-ch:
|
||||
if !sd.Serviceable() {
|
||||
return merr.WrapErrChannelNotAvailable(sd.vchannelName, "delegator closed during wait tsafe")
|
||||
return 0, merr.WrapErrChannelNotAvailable(sd.vchannelName, "delegator closed during wait tsafe")
|
||||
}
|
||||
return nil
|
||||
return sd.latestTsafe.Load(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -398,7 +398,6 @@ func (node *QueryNode) searchChannel(ctx context.Context, req *querypb.SearchReq
|
||||
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel, metrics.Leader).Inc()
|
||||
metrics.QueryNodeSearchNQ.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(req.Req.GetNq()))
|
||||
metrics.QueryNodeSearchTopK.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(req.Req.GetTopk()))
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
||||
@ -84,6 +84,7 @@ type SearchRequest struct {
|
||||
cPlaceholderGroup C.CPlaceholderGroup
|
||||
msgID UniqueID
|
||||
searchFieldID UniqueID
|
||||
mvccTimestamp Timestamp
|
||||
}
|
||||
|
||||
func NewSearchRequest(ctx context.Context, collection *Collection, req *querypb.SearchRequest, placeholderGrp []byte) (*SearchRequest, error) {
|
||||
@ -123,6 +124,7 @@ func NewSearchRequest(ctx context.Context, collection *Collection, req *querypb.
|
||||
cPlaceholderGroup: cPlaceholderGroup,
|
||||
msgID: req.GetReq().GetBase().GetMsgID(),
|
||||
searchFieldID: int64(fieldID),
|
||||
mvccTimestamp: req.GetReq().GetMvccTimestamp(),
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
||||
@ -35,6 +35,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
type ReduceSuite struct {
|
||||
@ -168,6 +169,7 @@ func (suite *ReduceSuite) TestReduceAllFunc() {
|
||||
plan, err := createSearchPlanByExpr(context.Background(), suite.collection, serializedPlan, "")
|
||||
suite.NoError(err)
|
||||
searchReq, err := parseSearchRequest(context.Background(), plan, placeGroupByte)
|
||||
searchReq.mvccTimestamp = typeutil.MaxTimestamp
|
||||
suite.NoError(err)
|
||||
defer searchReq.Delete()
|
||||
|
||||
|
||||
@ -49,6 +49,12 @@ func ReduceSearchResults(ctx context.Context, results []*internalpb.SearchResult
|
||||
return results[0], nil
|
||||
}
|
||||
|
||||
channelsMvcc := make(map[string]uint64)
|
||||
for _, r := range results {
|
||||
for ch, ts := range r.GetChannelsMvcc() {
|
||||
channelsMvcc[ch] = ts
|
||||
}
|
||||
}
|
||||
log := log.Ctx(ctx)
|
||||
|
||||
searchResultData, err := DecodeSearchResults(results)
|
||||
@ -88,7 +94,7 @@ func ReduceSearchResults(ctx context.Context, results []*internalpb.SearchResult
|
||||
return nil, false
|
||||
})
|
||||
searchResults.CostAggregation = mergeRequestCost(requestCosts)
|
||||
|
||||
searchResults.ChannelsMvcc = channelsMvcc
|
||||
return searchResults, nil
|
||||
}
|
||||
|
||||
|
||||
@ -388,6 +388,7 @@ func (s *LocalSegment) Search(ctx context.Context, searchReq *SearchRequest) (*S
|
||||
searchReq.plan.cSearchPlan,
|
||||
searchReq.cPlaceholderGroup,
|
||||
traceCtx,
|
||||
C.uint64_t(searchReq.mvccTimestamp),
|
||||
&searchResult.cSearchResult,
|
||||
)
|
||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
@ -656,8 +656,13 @@ func (node *QueryNode) SearchSegments(ctx context.Context, req *querypb.SearchRe
|
||||
zap.String("channel", channel),
|
||||
zap.String("scope", req.GetScope().String()),
|
||||
)
|
||||
|
||||
resp := &internalpb.SearchResults{}
|
||||
channelsMvcc := make(map[string]uint64)
|
||||
for _, ch := range req.GetDmlChannels() {
|
||||
channelsMvcc[ch] = req.GetReq().GetMvccTimestamp()
|
||||
}
|
||||
resp := &internalpb.SearchResults{
|
||||
ChannelsMvcc: channelsMvcc,
|
||||
}
|
||||
if err := node.lifetime.Add(merr.IsHealthy); err != nil {
|
||||
resp.Status = merr.Status(err)
|
||||
return resp, nil
|
||||
@ -733,7 +738,8 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
||||
|
||||
log.Debug("Received SearchRequest",
|
||||
zap.Int64s("segmentIDs", req.GetSegmentIDs()),
|
||||
zap.Uint64("guaranteeTimestamp", req.GetReq().GetGuaranteeTimestamp()))
|
||||
zap.Uint64("guaranteeTimestamp", req.GetReq().GetGuaranteeTimestamp()),
|
||||
zap.Uint64("mvccTimestamp", req.GetReq().GetMvccTimestamp()))
|
||||
|
||||
tr := timerecord.NewTimeRecorderWithTrace(ctx, "SearchRequest")
|
||||
|
||||
@ -763,6 +769,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
|
||||
|
||||
toReduceResults := make([]*internalpb.SearchResults, len(req.GetDmlChannels()))
|
||||
runningGp, runningCtx := errgroup.WithContext(ctx)
|
||||
|
||||
for i, ch := range req.GetDmlChannels() {
|
||||
ch := ch
|
||||
req := &querypb.SearchRequest{
|
||||
|
||||
@ -1144,6 +1144,7 @@ func (suite *ServiceSuite) genCSearchRequest(nq int64, dataType schemapb.DataTyp
|
||||
PlaceholderGroup: placeHolder,
|
||||
DslType: commonpb.DslType_BoolExprV1,
|
||||
Nq: nq,
|
||||
MvccTimestamp: typeutil.MaxTimestamp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -257,6 +257,7 @@ func (t *SearchTask) Merge(other *SearchTask) bool {
|
||||
// Check mergeable
|
||||
if t.req.GetReq().GetDbID() != other.req.GetReq().GetDbID() ||
|
||||
t.req.GetReq().GetCollectionID() != other.req.GetReq().GetCollectionID() ||
|
||||
t.req.GetReq().GetMvccTimestamp() != other.req.GetReq().GetMvccTimestamp() ||
|
||||
t.req.GetReq().GetDslType() != other.req.GetReq().GetDslType() ||
|
||||
t.req.GetDmlChannels()[0] != other.req.GetDmlChannels()[0] ||
|
||||
nq+otherNq > paramtable.Get().QueryNodeCfg.MaxGroupNQ.GetAsInt64() ||
|
||||
@ -300,6 +301,13 @@ func (t *SearchTask) Wait() error {
|
||||
}
|
||||
|
||||
func (t *SearchTask) Result() *internalpb.SearchResults {
|
||||
if t.result != nil {
|
||||
channelsMvcc := make(map[string]uint64)
|
||||
for _, ch := range t.req.GetDmlChannels() {
|
||||
channelsMvcc[ch] = t.req.GetReq().GetMvccTimestamp()
|
||||
}
|
||||
t.result.ChannelsMvcc = channelsMvcc
|
||||
}
|
||||
return t.result
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user