diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 1c54328eaf..4db5bf3b49 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -167,7 +167,7 @@ To run E2E tests, use these command: ```shell cd tests20/python_client pip install -r requirements.txt -pytest --tags=L0 --workers 4 +pytest --tags=L0 -n auto ``` ## GitHub Flow diff --git a/tests20/python_client/base/collection_wrapper.py b/tests20/python_client/base/collection_wrapper.py index 8dec129968..46086d58e0 100644 --- a/tests20/python_client/base/collection_wrapper.py +++ b/tests20/python_client/base/collection_wrapper.py @@ -6,8 +6,10 @@ from check.func_check import * from utils.api_request import api_request from utils.util_log import test_log as log +TIMEOUT = 20 +#keep small timeout for stability tests +#TIMEOUT = 5 -TIMEOUT = 5 class ApiCollectionWrapper: diff --git a/tests20/python_client/check/func_check.py b/tests20/python_client/check/func_check.py index 66c38401f7..8661ab9dee 100644 --- a/tests20/python_client/check/func_check.py +++ b/tests20/python_client/check/func_check.py @@ -189,12 +189,11 @@ class ResponseChecker: else: ids_match = pc.list_contain_check(list(hits.ids), list(check_items["ids"])) - if ids_match: - log.info("search_results_check: limit (topK) and " - "ids searched for each query are correct") - else: + if not ids_match: log.error("search_results_check: ids searched not match") assert ids_match + log.info("search_results_check: limit (topK) and " + "ids searched for %d queries are correct" % len(search_res)) return True @staticmethod diff --git a/tests20/python_client/common/common_func.py b/tests20/python_client/common/common_func.py index cad1555a14..45483feb79 100644 --- a/tests20/python_client/common/common_func.py +++ b/tests20/python_client/common/common_func.py @@ -268,6 +268,58 @@ def gen_invalid_field_types(): return field_types +def gen_invaild_search_params_type(): + invalid_search_key = 100 + search_params = [] + for index_type in ct.all_index_types: + if index_type == "FLAT": + continue + search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}}) + if index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_SQ8H", "IVF_PQ"]: + for nprobe in ct.get_invalid_ints: + ivf_search_params = {"index_type": index_type, "search_params": {"nprobe": nprobe}} + search_params.append(ivf_search_params) + elif index_type in ["HNSW", "RHNSW_FLAT", "RHNSW_PQ", "RHNSW_SQ"]: + for ef in ct.get_invalid_ints: + hnsw_search_param = {"index_type": index_type, "search_params": {"ef": ef}} + search_params.append(hnsw_search_param) + elif index_type in ["NSG", "RNSG"]: + for search_length in ct.get_invalid_ints: + nsg_search_param = {"index_type": index_type, "search_params": {"search_length": search_length}} + search_params.append(nsg_search_param) + search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}}) + elif index_type == "ANNOY": + for search_k in ct.get_invalid_ints: + if isinstance(search_k, int): + continue + annoy_search_param = {"index_type": index_type, "search_params": {"search_k": search_k}} + search_params.append(annoy_search_param) + return search_params + +def gen_search_param(index_type, metric_type="L2"): + search_params = [] + if index_type in ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8H", "IVF_PQ"] \ + or index_type in ["BIN_FLAT", "BIN_IVF_FLAT"]: + for nprobe in [64, 128]: + ivf_search_params = {"metric_type": metric_type, "params": {"nprobe": nprobe}} + search_params.append(ivf_search_params) + elif index_type in ["HNSW", "RHNSW_FLAT", "RHNSW_PQ", "RHNSW_SQ"]: + for ef in [64, 32768]: + hnsw_search_param = {"metric_type": metric_type, "params": {"ef": ef}} + search_params.append(hnsw_search_param) + elif index_type in ["NSG", "RNSG"]: + for search_length in [100, 300]: + nsg_search_param = {"metric_type": metric_type, "params": {"search_length": search_length}} + search_params.append(nsg_search_param) + elif index_type == "ANNOY": + for search_k in [1000, 5000]: + annoy_search_param = {"metric_type": metric_type, "params": {"search_length": search_k}} + search_params.append(annoy_search_param) + else: + log.error("Invalid index_type.") + raise Exception("Invalid index_type.") + return search_params + def gen_all_type_fields(): fields = [] for k, v in DataType.__members__.items(): diff --git a/tests20/python_client/common/common_type.py b/tests20/python_client/common/common_type.py index a3307069eb..49cd5dbadf 100644 --- a/tests20/python_client/common/common_type.py +++ b/tests20/python_client/common/common_type.py @@ -43,6 +43,7 @@ float_vec_field_desc = "float vector type field" binary_vec_field_desc = "binary vector type field" max_dim = 32768 gracefulTime = 1 +default_nlist = 128 Not_Exist = "Not_Exist" Connect_Object_Name = "Milvus" @@ -100,6 +101,7 @@ get_invalid_vectors = [ ] get_invalid_ints = [ + 9999999999, 1.0, None, [1, 2, 3], @@ -127,6 +129,7 @@ get_wrong_format_dict = [ {"host": 0, "port": 19520} ] + """ Specially defined list """ all_index_types = ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_PQ", "HNSW", "ANNOY", "RHNSW_FLAT", "RHNSW_PQ", "RHNSW_SQ", "BIN_FLAT", "BIN_IVF_FLAT"] diff --git a/tests20/python_client/conftest.py b/tests20/python_client/conftest.py index 210615cd66..c8c69c74b3 100644 --- a/tests20/python_client/conftest.py +++ b/tests20/python_client/conftest.py @@ -197,3 +197,14 @@ def get_invalid_index_params(request): @pytest.fixture(params=ct.get_invalid_strs) def get_invalid_partition_name(request): yield request.param + + +# for test exit in the future +# @pytest.hookimpl(hookwrapper=True, tryfirst=True) +# def pytest_runtest_makereport(): +# result = yield +# report = result.get_result() +# if report.outcome == "failed": +# msg = "The execution of the test case fails and the test exits..." +# log.error(msg) +# pytest.exit(msg) \ No newline at end of file diff --git a/tests20/python_client/testcases/test_search.py b/tests20/python_client/testcases/test_search.py index 98b13b92d8..8eefa16414 100644 --- a/tests20/python_client/testcases/test_search.py +++ b/tests20/python_client/testcases/test_search.py @@ -48,8 +48,14 @@ class TestCollectionSearchInvalid(TestcaseBase): pytest.skip("empty field is valid") yield request.param + @pytest.fixture(scope="function", params=ct.get_invalid_strs) + def get_invalid_metric_type(self, request): + yield request.param + @pytest.fixture(scope="function", params=ct.get_invalid_ints) def get_invalid_limit(self, request): + if isinstance(request.param, int) and request.param >= 0: + pytest.skip("positive int is valid type for limit") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) @@ -84,6 +90,7 @@ class TestCollectionSearchInvalid(TestcaseBase): pytest.skip("None is valid for output_fields") yield request.param + """ ****************************************************************** # The followings are invalid cases @@ -156,7 +163,7 @@ class TestCollectionSearchInvalid(TestcaseBase): def test_search_param_invalid_vectors(self, get_invalid_vectors): """ target: test search with invalid parameter values - method: search with invalid field + method: search with invalid data expected: raise exception and report the error """ # 1. initialize with data @@ -192,24 +199,100 @@ class TestCollectionSearchInvalid(TestcaseBase): "is different from schema"}) @pytest.mark.tags(CaseLabel.L1) - def test_search_param_invalid_metric_type(self): + def test_search_param_invalid_field_type(self, get_invalid_fields_type): + """ + target: test search with invalid parameter type + method: search with invalid field + expected: raise exception and report the error + """ + # 1. initialize with data + collection_w = self.init_collection_general(prefix)[0] + # 2. search with invalid field + invalid_search_field = get_invalid_fields_type + log.info("test_search_param_invalid_field_type: searching with " + "invalid field: %s" % invalid_search_field) + vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] + collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, + default_limit, default_search_exp, + check_task=CheckTasks.err_res, + check_items= + {"err_code": 1, + "err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)}) + + @pytest.mark.tags(CaseLabel.L1) + def test_search_param_invalid_field_value(self, get_invalid_fields_value): + """ + target: test search with invalid parameter values + method: search with invalid field + expected: raise exception and report the error + """ + # 1. initialize with data + collection_w = self.init_collection_general(prefix)[0] + # 2. search with invalid field + invalid_search_field = get_invalid_fields_value + log.info("test_search_param_invalid_field_value: searching with " + "invalid field: %s" % invalid_search_field) + vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] + collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, + default_limit, default_search_exp, + check_task=CheckTasks.err_res, + check_items={"err_code": 1, + "err_msg": "Field %s doesn't exist in schema" + % invalid_search_field}) + + @pytest.mark.tags(CaseLabel.L1) + def test_search_param_invalid_metric_type(self, get_invalid_metric_type): """ target: test search with invalid parameter values method: search with invalid metric type expected: raise exception and report the error """ # 1. initialize with data - collection_w = self.init_collection_general(prefix)[0] + collection_w = self.init_collection_general(prefix, True, 10)[0] # 2. search with invalid metric_type log.info("test_search_param_invalid_metric_type: searching with invalid metric_type") + invalid_metric = get_invalid_metric_type vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] - search_params = {"metric_type": "L10", "params": {"nprobe": 10}} + search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}} collection_w.search(vectors[:default_nq], default_search_field, search_params, default_limit, default_search_exp, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "metric type not found"}) + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.xfail(reason="issue 6727") + @pytest.mark.parametrize("index, params", + zip(ct.all_index_types[:9], + ct.default_index_params[:9])) + def test_search_invalid_params_type(self, index, params): + """ + target: test search with invalid search params + method: test search with invalid params type + expected: raise exception and report the error + """ + if index == "FLAT": + pytest.skip("skip in FLAT index") + # 1. initialize with data + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, + is_index=True) + # 2. create index and load + default_index = {"index_type": index, "params": params, "metric_type": "L2"} + collection_w.create_index("float_vector", default_index) + collection_w.load() + # 3. search + invalid_search_params = cf.gen_invaild_search_params_type() + vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] + for invalid_search_param in invalid_search_params: + if index == invalid_search_param["index_type"]: + search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]} + collection_w.search(vectors[:default_nq], default_search_field, + search_params, default_limit, + default_search_exp, + check_task=CheckTasks.err_res, + check_items={"err_code": 0, + "err_msg": "metric type not found"}) + @pytest.mark.tags(CaseLabel.L1) def test_search_param_invalid_limit_type(self, get_invalid_limit): """ @@ -253,52 +336,10 @@ class TestCollectionSearchInvalid(TestcaseBase): check_items={"err_code": 1, "err_msg": err_msg}) - @pytest.mark.tags(CaseLabel.L1) - def test_search_param_invalid_field_type(self, get_invalid_fields_type): - """ - target: test search with invalid parameter values - method: search with invalid field - expected: raise exception and report the error - """ - # 1. initialize with data - collection_w = self.init_collection_general(prefix)[0] - # 2. search with invalid field - invalid_search_field = get_invalid_fields_type - log.info("test_search_param_invalid_field_type: searching with " - "invalid field: %s" % invalid_search_field) - vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] - collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, - default_limit, default_search_exp, - check_task=CheckTasks.err_res, - check_items= - {"err_code": 1, - "err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)}) - - @pytest.mark.tags(CaseLabel.L1) - def test_search_param_invalid_field_value(self, get_invalid_fields_value): - """ - target: test search with invalid parameter values - method: search with invalid field - expected: raise exception and report the error - """ - # 1. initialize with data - collection_w = self.init_collection_general(prefix)[0] - # 2. search with invalid field - invalid_search_field = get_invalid_fields_value - log.info("test_search_param_invalid_field_value: searching with " - "invalid field: %s" % invalid_search_field) - vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] - collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params, - default_limit, default_search_exp, - check_task=CheckTasks.err_res, - check_items={"err_code": 1, - "err_msg": "Field %s doesn't exist in schema" - % invalid_search_field}) - @pytest.mark.tags(CaseLabel.L1) def test_search_param_invalid_expr_type(self, get_invalid_expr_type): """ - target: test search with invalid parameter values + target: test search with invalid parameter type method: search with invalid search expressions expected: raise exception and report the error """ @@ -337,7 +378,7 @@ class TestCollectionSearchInvalid(TestcaseBase): % invalid_search_expr}) @pytest.mark.tags(CaseLabel.L2) - def test_search_index_partition_invalid_type(self, get_invalid_partition): + def test_search_partition_invalid_type(self, get_invalid_partition): """ target: test search invalid partition method: search with invalid partition type @@ -346,10 +387,7 @@ class TestCollectionSearchInvalid(TestcaseBase): # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] - # 2. create index - default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} - collection_w.create_index("float_vector", default_index) - # 3. search the non exist partition + # 2. search the invalid partition partition_name = get_invalid_partition err_msg = "`partition_name_array` value {} is illegal".format(partition_name) collection_w.search(vectors[:default_nq], default_search_field, default_search_params, @@ -362,8 +400,8 @@ class TestCollectionSearchInvalid(TestcaseBase): def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields): """ target: test search with output fields - method: search with non-exist output_field - expected: search success + method: search with invalid output_field + expected: raise exception and report the error """ # 1. initialize with data collection_w = self.init_collection_general(prefix)[0] @@ -567,10 +605,12 @@ class TestCollectionSearchInvalid(TestcaseBase): ct.err_msg: 'Field int63 not exist'}) @pytest.mark.tags(CaseLabel.L1) - def test_search_output_field_vector(self): + @pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]]) + def test_search_output_field_vector(self, output_fields): """ target: test search with vector as output field - method: search with one vector output_field + method: search with one vector output_field or + wildcard for vector expected: raise exception and report the error """ # 1. initialize with data @@ -580,11 +620,30 @@ class TestCollectionSearchInvalid(TestcaseBase): vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, - default_search_exp, output_fields=[default_search_field], + default_search_exp, output_fields=output_fields, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "Search doesn't support " "vector field as output_fields"}) + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]]) + def test_search_output_field_invalid_wildcard(self, output_fields): + """ + target: test search with invalid output wildcard + method: search with invalid output_field wildcard + expected: raise exception and report the error + """ + # 1. initialize with data + collection_w = self.init_collection_general(prefix, True)[0] + # 2. search + log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name) + vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] + collection_w.search(vectors[:default_nq], default_search_field, + default_search_params, default_limit, + default_search_exp, output_fields=output_fields, + check_task=CheckTasks.err_res, + check_items={"err_code": 1, + "err_msg": f"Field {output_fields[-1]} not exist"}) class TestCollectionSearch(TestcaseBase): """ Test case of search interface """ @@ -637,14 +696,14 @@ class TestCollectionSearch(TestcaseBase): "limit": default_limit}) @pytest.mark.tags(CaseLabel.L1) - def test_search_with_empty_vectors(self, nb, dim, auto_id, _async): + def test_search_with_empty_vectors(self, dim, auto_id, _async): """ target: test search with empty query vector method: search using empty query vector expected: search successfully with 0 results """ # 1. initialize without data - collection_w = self.init_collection_general(prefix, True, nb, + collection_w = self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0] # 2. search collection without data log.info("test_search_with_empty_vectors: Searching collection %s " @@ -655,6 +714,29 @@ class TestCollectionSearch(TestcaseBase): check_items={"nq": 0, "_async": _async}) + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}]) + def test_search_normal_default_params(self, dim, auto_id, search_params, _async): + """ + target: test search normal case + method: create connection, collection, insert and search + expected: search successfully with limit(topK) + """ + # 1. initialize with data + collection_w, _, _, insert_ids = \ + self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim) + # 2. search + log.info("test_search_normal: searching collection %s" % collection_w.name) + vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] + collection_w.search(vectors[:default_nq], default_search_field, + search_params, default_limit, + default_search_exp, _async=_async, + check_task=CheckTasks.check_search_results, + check_items={"nq": default_nq, + "ids": insert_ids, + "limit": default_limit, + "_async": _async}) + @pytest.mark.tags(CaseLabel.L1) def test_search_before_after_delete(self, nq, dim, auto_id, _async): """ @@ -962,22 +1044,26 @@ class TestCollectionSearch(TestcaseBase): "_async": _async}) @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.xfail(reason="issue 6731") @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) - def test_search_after_different_index(self, nb, nq, dim, index, params, auto_id, _async): + def test_search_after_different_index(self, nq, dim, index, params, auto_id, _async): """ target: test search with different index method: test search with different index expected: searched successfully """ # 1. initialize with data - collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, partition_num=1, auto_id=auto_id, dim=dim, is_index=True) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create different index + if params.get("m"): + if (dim % params["m"]) != 0: + params["m"] = dim//4 log.info("test_search_after_different_index: Creating index-%s" % index) default_index = {"index_type": index, "params": params, "metric_type": "L2"} collection_w.create_index("float_vector", default_index) @@ -995,37 +1081,80 @@ class TestCollectionSearch(TestcaseBase): "_async": _async}) @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.xfail(reason="issue 6731") @pytest.mark.parametrize("index, params", zip(ct.all_index_types[:9], ct.default_index_params[:9])) - def test_search_after_index_different_metric_type(self, nb, nq, dim, index, params, auto_id, _async): + def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async): + """ + target: test search with invalid search params + method: test search with invalid params type + expected: raise exception and report the error + """ + # 1. initialize with data + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, + partition_num=1, + auto_id=auto_id, + dim=dim, is_index=True) + # 2. create index and load + if params.get("m"): + if (dim % params["m"]) != 0: + params["m"] = dim//4 + default_index = {"index_type": index, "params": params, "metric_type": "L2"} + collection_w.create_index("float_vector", default_index) + collection_w.load() + # 3. search + search_params = cf.gen_search_param(index) + vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)] + for search_param in search_params: + log.info("Searching with search params: {}".format(search_param)) + collection_w.search(vectors[:default_nq], default_search_field, + search_param, default_limit, + default_search_exp, _async=_async, + check_task=CheckTasks.check_search_results, + check_items={"nq": default_nq, + "ids": insert_ids, + "limit": default_limit, + "_async": _async}) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.xfail(reason="issue 6731") + @pytest.mark.parametrize("index, params", + zip(ct.all_index_types[:9], + ct.default_index_params[:9])) + def test_search_after_index_different_metric_type(self, nq, dim, index, params, auto_id, _async): """ target: test search with different metric type method: test search with different metric type expected: searched successfully """ # 1. initialize with data - collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000, partition_num=1, auto_id=auto_id, dim=dim, is_index=True) vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] # 2. create different index + if params.get("m"): + if (dim % params["m"]) != 0: + params["m"] = dim//4 log.info("test_search_after_index_different_metric_type: Creating index-%s" % index) default_index = {"index_type": index, "params": params, "metric_type": "IP"} collection_w.create_index("float_vector", default_index) log.info("test_search_after_index_different_metric_type: Created index-%s" % index) collection_w.load() # 3. search - log.info("test_search_after_index_different_metric_type: Searching after creating index-%s" % index) - collection_w.search(vectors[:nq], default_search_field, - default_search_params, default_limit, - default_search_exp, _async=_async, - check_task=CheckTasks.check_search_results, - check_items={"nq": nq, - "ids": insert_ids, - "limit": default_limit, - "_async": _async}) + search_params = cf.gen_search_param(index, "IP") + for search_param in search_params: + log.info("Searching with search params: {}".format(search_param)) + collection_w.search(vectors[:default_nq], default_search_field, + search_param, default_limit, + default_search_exp, _async=_async, + check_task=CheckTasks.check_search_results, + check_items={"nq": default_nq, + "ids": insert_ids, + "limit": default_limit, + "_async": _async}) @pytest.mark.tags(CaseLabel.L2) def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async): @@ -1091,8 +1220,8 @@ class TestCollectionSearch(TestcaseBase): # 2. create collection with multiple vectors c_name = cf.gen_unique_str(prefix) fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), - cf.gen_float_vec_field(), cf.gen_float_vec_field(name="tmp")] - schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id, dim=dim) + cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)] + schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id) collection_w = self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property, check_items={"name": c_name, "schema": schema})[0] @@ -1127,7 +1256,6 @@ class TestCollectionSearch(TestcaseBase): "_async": _async}) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue 6713") def test_search_index_one_partition(self, nb, auto_id, _async): """ target: test search from partition @@ -1152,8 +1280,9 @@ class TestCollectionSearch(TestcaseBase): limit_check = par[1].num_entities else: limit_check = limit + search_params = {"metric_type": "L2", "params": {"nprobe": 128}} collection_w.search(vectors[:default_nq], default_search_field, - default_search_params, limit, default_search_exp, + search_params, limit, default_search_exp, [par[1].name], _async=_async, check_task=CheckTasks.check_search_results, check_items={"nq": default_nq, @@ -1437,6 +1566,39 @@ class TestCollectionSearch(TestcaseBase): "limit": limit, "_async": _async}) + @pytest.mark.tags(CaseLabel.L2) + def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async): + """ + target: test search using different supported data type + method: search using different supported data type + expected: search success + """ + # 1. initialize with data + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, + is_all_data_type=True, + auto_id=auto_id, + dim=dim) + # 2. search + log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name) + vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] + search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \ + "&& int8 >= 0 && float >= 0 && double >= 0" + res = collection_w.search(vectors[:nq], default_search_field, + default_search_params, default_limit, + search_exp, _async=_async, + output_fields=[default_int64_field_name, + default_float_field_name], + check_task=CheckTasks.check_search_results, + check_items={"nq": nq, + "ids": insert_ids, + "limit": default_limit, + "_async": _async})[0] + if _async: + res.done() + res = res.result() + assert len(res[0][0].entity._row_data) != 0 + assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data + @pytest.mark.tags(CaseLabel.L2) def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async): """ @@ -1525,29 +1687,25 @@ class TestCollectionSearch(TestcaseBase): assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data @pytest.mark.tags(CaseLabel.L2) - def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async): + @pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]]) + def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async): """ - target: test search using different supported data type - method: search using different supported data type + target: test search with output fields using wildcard + method: search with one output_field (wildcard) expected: search success """ # 1. initialize with data - collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb, - is_all_data_type=True, - auto_id=auto_id, - dim=dim) + collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, + auto_id=auto_id) # 2. search - log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name) - vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] - search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \ - "&& int8 >= 0 && float >= 0 && double >= 0" - res = collection_w.search(vectors[:nq], default_search_field, + log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name) + vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)] + res = collection_w.search(vectors[:default_nq], default_search_field, default_search_params, default_limit, - search_exp, _async=_async, - output_fields=[default_int64_field_name, - default_float_field_name], + default_search_exp, _async=_async, + output_fields=output_fields, check_task=CheckTasks.check_search_results, - check_items={"nq": nq, + check_items={"nq": default_nq, "ids": insert_ids, "limit": default_limit, "_async": _async})[0] diff --git a/tests20/python_client/utils/api_request.py b/tests20/python_client/utils/api_request.py index 4e3c2da35b..28bb909d84 100644 --- a/tests20/python_client/utils/api_request.py +++ b/tests20/python_client/utils/api_request.py @@ -16,11 +16,13 @@ def api_request_catch(): def inner_wrapper(*args, **kwargs): try: res = func(*args, **kwargs) - log_res = str(res)[0:log_row_length] + '......' if len(str(res)) > log_row_length else str(res) + res_str = str(res) + log_res = res_str[0:log_row_length] + '......' if len(res_str) > log_row_length else res_str log.debug("(api_response) : %s " % log_res) return res, True except Exception as e: - log_e = str(e)[0:log_row_length] + '......' if len(str(e)) > log_row_length else str(e) + e_str = str(e) + log_e = e_str[0:log_row_length] + '......' if len(e_str) > log_row_length else e_str log.error(traceback.format_exc()) log.error("(api_response) : %s" % log_e) return Error(e), False @@ -37,7 +39,8 @@ def api_request(_list, **kwargs): if len(_list) > 1: for a in _list[1:]: arg.append(a) - log_arg = str(arg)[0:log_row_length] + '......' if len(str(arg)) > log_row_length else str(arg) + arg_str = str(arg) + log_arg = arg_str[0:log_row_length] + '......' if len(arg_str) > log_row_length else arg_str log.debug("(api_request) : [%s] args: %s, kwargs: %s" % (func.__qualname__, log_arg, str(kwargs))) return func(*arg, **kwargs) return False, False