test: remove xfail of fts test cases after fix (#37724)

Signed-off-by: zhuwenxing <wenxing.zhu@zilliz.com>
This commit is contained in:
zhuwenxing 2024-11-16 11:14:30 +08:00 committed by GitHub
parent ead1e7f68c
commit 3f7352f3cf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 11 additions and 9 deletions

View File

@ -1090,7 +1090,6 @@ class TestUpsertWithFullTextSearch(TestcaseBase):
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("nullable", [False, True])
@pytest.mark.parametrize("tokenizer", ["standard"])
@pytest.mark.xfail(reason="issue: https://github.com/milvus-io/milvus/issues/37021")
def test_upsert_for_full_text_search(self, tokenizer, nullable):
"""
target: test upsert data for full text search
@ -1261,7 +1260,6 @@ class TestUpsertWithFullTextSearchNegative(TestcaseBase):
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("nullable", [False])
@pytest.mark.parametrize("tokenizer", ["standard"])
@pytest.mark.xfail(reason="issue: https://github.com/milvus-io/milvus/issues/37021")
def test_upsert_for_full_text_search_with_no_varchar_data(self, tokenizer, nullable):
"""
target: test upsert data for full text search with no varchar data
@ -2327,8 +2325,13 @@ class TestSearchWithFullTextSearch(TestcaseBase):
3. verify the result
expected: full text search successfully and result is correct
"""
if tokenizer == "jieba":
lang_type = "chinese"
else:
lang_type = "english"
analyzer_params = {
"tokenizer": tokenizer,
"type": lang_type,
}
dim = 128
fields = [

View File

@ -4631,10 +4631,9 @@ class TestQueryTextMatch(TestcaseBase):
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("enable_partition_key", [True, False])
@pytest.mark.parametrize("enable_inverted_index", [True, False])
@pytest.mark.parametrize("tokenizer", ["jieba"])
@pytest.mark.xfail(reason="unstable")
@pytest.mark.parametrize("lang_type", ["chinese"])
def test_query_text_match_zh_normal(
self, tokenizer, enable_inverted_index, enable_partition_key
self, lang_type, enable_inverted_index, enable_partition_key
):
"""
target: test text match normal
@ -4644,7 +4643,7 @@ class TestQueryTextMatch(TestcaseBase):
expected: text match successfully and result is correct
"""
analyzer_params = {
"tokenizer": tokenizer,
"type": lang_type,
}
dim = 128
fields = [
@ -4690,7 +4689,7 @@ class TestQueryTextMatch(TestcaseBase):
name=cf.gen_unique_str(prefix), schema=schema
)
fake = fake_en
if tokenizer == "jieba":
if lang_type == "chinese":
language = "zh"
fake = fake_zh
else:
@ -4763,7 +4762,7 @@ class TestQueryTextMatch(TestcaseBase):
res, _ = collection_w.query(expr=expr, output_fields=["id", field])
log.info(f"res len {len(res)}")
for r in res:
assert any([token in r[field] for token in top_10_tokens])
assert any([token in r[field] for token in top_10_tokens]), f"top 10 tokens {top_10_tokens} not in {r[field]}"