diff --git a/tests/python_client/testcases/test_high_level_api.py b/tests/python_client/testcases/test_high_level_api.py index 3d7c215cc8..a55ed029db 100644 --- a/tests/python_client/testcases/test_high_level_api.py +++ b/tests/python_client/testcases/test_high_level_api.py @@ -144,7 +144,7 @@ class TestHighLevelApi(TestcaseBase): rng = np.random.default_rng(seed=19530) vectors_to_search = rng.random((1, 8)) search_params = {"metric_type": metric_type} - error = {ct.err_code: 1, ct.err_msg: f"metric type not match: expected=IP, actual={metric_type}"} + error = {ct.err_code: 65535, ct.err_msg: f"metric type not match: expected=IP, actual={metric_type}"} client_w.search(client, collection_name, vectors_to_search, limit=default_limit, search_params=search_params, check_task=CheckTasks.err_res, check_items=error) diff --git a/tests/python_client/testcases/test_insert.py b/tests/python_client/testcases/test_insert.py index 8563417b8a..4d09a79159 100644 --- a/tests/python_client/testcases/test_insert.py +++ b/tests/python_client/testcases/test_insert.py @@ -1147,12 +1147,10 @@ class TestInsertAsync(TestcaseBase): method: insert async with invalid partition expected: raise exception """ - collection_w = self.init_collection_wrap( - name=cf.gen_unique_str(prefix)) + collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix)) df = cf.gen_default_dataframe_data() - err_msg = "partition=p: partition not found" - future, _ = collection_w.insert( - data=df, partition_name="p", _async=True) + err_msg = "partition not found" + future, _ = collection_w.insert(data=df, partition_name="p", _async=True) future.done() with pytest.raises(MilvusException, match=err_msg): future.result() @@ -2142,7 +2140,7 @@ class TestUpsertInvalid(TestcaseBase): collection_w = self.init_collection_wrap(name=c_name) data = cf.gen_default_dataframe_data(nb=2) partition_name = "partition1" - error = {ct.err_code: 15, ct.err_msg: f"partition={partition_name}: partition not found"} + error = {ct.err_code: 200, ct.err_msg: f"partition not found[partition={partition_name}]"} collection_w.upsert(data=data, partition_name=partition_name, check_task=CheckTasks.err_res, check_items=error) diff --git a/tests/python_client/testcases/test_search.py b/tests/python_client/testcases/test_search.py index 47bbee2eac..269b763791 100644 --- a/tests/python_client/testcases/test_search.py +++ b/tests/python_client/testcases/test_search.py @@ -343,7 +343,7 @@ class TestCollectionSearchInvalid(TestcaseBase): search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, - check_items={"err_code": 65538, + check_items={"err_code": 65535, "err_msg": "failed to search"}) @pytest.mark.skip("not fixed yet") @@ -873,14 +873,8 @@ class TestCollectionSearchInvalid(TestcaseBase): collection_w.search(vectors[:default_nq], default_search_field, search_params, reorder_k + 1, check_task=CheckTasks.err_res, - check_items={"err_code": 65538, - "err_msg": "failed to search: attempt #0: failed to search/query " - "delegator 1 for channel by-dev-rootcoord-dml_12_44501" - "8735380972010v0: fail to Search, QueryNode ID=1, reaso" - "n=worker(1) query failed: UnknownError: => failed to " - "search: out of range in json: reorder_k(100) should be" - " larger than k(101): attempt #1: no available shard de" - "legator found: service unavailable"}) + check_items={"err_code": 65535, + "err_msg": "reorder_k(100) should be larger than k(101)"}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("nq", [16385]) @@ -942,8 +936,9 @@ class TestCollectionSearchInvalid(TestcaseBase): collection_w.search(binary_vectors[:default_nq], "binary_vector", search_params, default_limit, "int64 >= 0", check_task=CheckTasks.err_res, - check_items={"err_code": 65538, "err_msg": "metric type not match: " - "expected=JACCARD, actual=L2"}) + check_items={"err_code": 65535, + "err_msg": "metric type not match: invalid " + "parameter[expected=JACCARD][actual=L2]"}) @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields_not_exist(self): @@ -6131,8 +6126,10 @@ class TestSearchDiskann(TestcaseBase): default_search_exp, output_fields=output_fields, check_task=CheckTasks.err_res, - check_items={"err_code": 65538, - "err_msg": "fail to search on all shard leaders"}) + check_items={"err_code": 65535, + "err_msg": "search_list_size should be in range: [topk, " + "max(200, topk * 10)], topk = 1, search_list_" + "size = {}".format(search_list)}) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("limit", [20]) @@ -6589,8 +6586,9 @@ class TestCollectionRangeSearch(TestcaseBase): range_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, - check_items={ct.err_code: 1, - ct.err_msg: "metric type not match: expected=COSINE, actual=IP"}) + check_items={ct.err_code: 65535, + ct.err_msg: "metric type not match: " + "invalid parameter[expected=COSINE][actual=IP]"}) @pytest.mark.tags(CaseLabel.L2) def test_range_search_only_radius(self): @@ -6622,8 +6620,9 @@ class TestCollectionRangeSearch(TestcaseBase): range_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, - check_items={ct.err_code: 1, - ct.err_msg: "metric type not match: expected=L2, actual=IP"}) + check_items={ct.err_code: 65535, + ct.err_msg: "metric type not match: invalid " + "parameter[expected=L2][actual=IP]"}) @pytest.mark.tags(CaseLabel.L2) def test_range_search_radius_range_filter_not_in_params(self): @@ -6655,8 +6654,9 @@ class TestCollectionRangeSearch(TestcaseBase): range_search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, - check_items={ct.err_code: 1, - ct.err_msg: "metric type not match: expected=COSINE, actual=IP"}) + check_items={ct.err_code: 65535, + ct.err_msg: "metric type not match: invalid " + "parameter[expected=COSINE][actual=IP]"}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("dup_times", [1, 2]) diff --git a/tests/python_client/testcases/test_utility.py b/tests/python_client/testcases/test_utility.py index 3b66fd0e96..18951c6add 100644 --- a/tests/python_client/testcases/test_utility.py +++ b/tests/python_client/testcases/test_utility.py @@ -245,7 +245,7 @@ class TestUtilityParams(TestcaseBase): self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name) self.collection_wrap.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index) self.collection_wrap.load() - error = {ct.err_code: 4, ct.err_msg: "collection default:not_existed_name: collection not found"} + error = {ct.err_code: 100, ct.err_msg: "collection not found[database=default][collection=not_existed_name]"} self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tags(CaseLabel.L2) @@ -289,7 +289,8 @@ class TestUtilityParams(TestcaseBase): self.utility_wrap.wait_for_loading_complete( c_name, check_task=CheckTasks.err_res, - check_items={ct.err_code: 4, ct.err_msg: f"collection default:{c_name}: collection not found"}) + check_items={ct.err_code: 100, ct.err_msg: "collection not found[database=default]" + "[collection={}]".format(c_name)}) @pytest.mark.tags(CaseLabel.L2) def test_wait_for_loading_partition_not_existed(self): @@ -607,9 +608,9 @@ class TestUtilityParams(TestcaseBase): new_collection_name = cf.gen_unique_str(prefix) self.utility_wrap.rename_collection(old_collection_name, new_collection_name, check_task=CheckTasks.err_res, - check_items={"err_code": 4, - "err_msg": "collection 1:test_collection_non_exist: " - "collection not found"}) + check_items={"err_code": 100, + "err_msg": "collection not found[database=1][collection" + "={}]".format(old_collection_name)}) @pytest.mark.tags(CaseLabel.L1) def test_rename_collection_existed_collection_name(self):