test: Add ivf_rabitq index tests (#41914)

related issue: #41760

---------

Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com>
This commit is contained in:
yanliang567 2025-05-20 19:28:24 +08:00 committed by GitHub
parent 59ab274dbe
commit d475d93a3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 768 additions and 67 deletions

View File

@ -87,7 +87,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed-pulsar'
values 'standalone'
}
}
stages {

View File

@ -2314,6 +2314,10 @@ def gen_search_param(index_type, metric_type="L2"):
for search_list in [20, 300, 1500]:
diskann_search_param = {"metric_type": metric_type, "params": {"search_list": search_list}}
search_params.append(diskann_search_param)
elif index_type == "IVF_RABITQ":
for rbq_bits_query in [6, 7]:
ivf_rabitq_search_param = {"metric_type": metric_type, "params": {"rbq_bits_query": rbq_bits_query}}
search_params.append(ivf_rabitq_search_param)
else:
log.error("Invalid index_type.")
raise Exception("Invalid index_type.")
@ -2840,6 +2844,11 @@ def get_search_params_params(index_type):
return params
def get_default_metric_for_vector_type(vector_type=DataType.FLOAT_VECTOR):
"""get default metric for vector type"""
return ct.default_metric_for_vector_type[vector_type]
def assert_json_contains(expr, list_data):
opposite = False
if expr.startswith("not"):
@ -3285,6 +3294,15 @@ def gen_sparse_vectors(nb, dim=1000, sparse_format="dok", empty_percentage=0):
]
return vectors
def gen_int8_vectors(num, dim):
raw_vectors = []
int8_vectors = []
for _ in range(num):
raw_vector = [random.randint(-128, 127) for _ in range(dim)]
raw_vectors.append(raw_vector)
int8_vector = np.array(raw_vector, dtype=np.int8)
int8_vectors.append(int8_vector)
return raw_vectors, int8_vectors
def gen_vectors(nb, dim, vector_data_type=DataType.FLOAT_VECTOR):
vectors = []
@ -3300,6 +3318,8 @@ def gen_vectors(nb, dim, vector_data_type=DataType.FLOAT_VECTOR):
vectors = gen_text_vectors(nb) # for Full Text Search
elif vector_data_type == DataType.BINARY_VECTOR:
vectors = gen_binary_vectors(nb, dim)[1]
elif vector_data_type == DataType.INT8_VECTOR:
vectors = gen_int8_vectors(nb, dim)[1]
else:
log.error(f"Invalid vector data type: {vector_data_type}")
raise Exception(f"Invalid vector data type: {vector_data_type}")

View File

@ -48,9 +48,29 @@ default_bfloat16_vec_field_name = "bfloat16_vector"
another_float_vec_field_name = "float_vector1"
default_binary_vec_field_name = "binary_vector"
text_sparse_vector = "TEXT_SPARSE_VECTOR"
append_vector_type = [DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR, DataType.SPARSE_FLOAT_VECTOR]
all_vector_types = [
DataType.FLOAT_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.SPARSE_FLOAT_VECTOR,
DataType.INT8_VECTOR,
DataType.BINARY_VECTOR,
]
default_metric_for_vector_type = {
DataType.FLOAT_VECTOR: "COSINE",
DataType.FLOAT16_VECTOR: "L2",
DataType.BFLOAT16_VECTOR: "IP",
DataType.SPARSE_FLOAT_VECTOR: "IP",
DataType.INT8_VECTOR: "COSINE",
DataType.BINARY_VECTOR: "HAMMING",
}
all_dense_vector_types = [DataType.FLOAT_VECTOR, DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR]
all_float_vector_dtypes = [DataType.FLOAT_VECTOR, DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR, DataType.SPARSE_FLOAT_VECTOR]
append_vector_type = [DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR, DataType.SPARSE_FLOAT_VECTOR]
default_sparse_vec_field_name = "sparse_vector"
default_partition_name = "_default"
default_resource_group_name = '__default_resource_group'
@ -225,6 +245,7 @@ get_all_kind_data_distribution = [
""" Specially defined list """
L0_index_types = ["IVF_SQ8", "HNSW", "DISKANN"]
all_index_types = ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_PQ",
"IVF_RABITQ",
"HNSW", "SCANN", "DISKANN",
"BIN_FLAT", "BIN_IVF_FLAT",
"SPARSE_INVERTED_INDEX", "SPARSE_WAND",
@ -233,12 +254,14 @@ all_index_types = ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_PQ",
inverted_index_algo = ['TAAT_NAIVE', 'DAAT_WAND', 'DAAT_MAXSCORE']
default_all_indexes_params = [{}, {"nlist": 128}, {"nlist": 128}, {"nlist": 128, "m": 16, "nbits": 8},
{"nlist": 128, "refine": 'true', "refine_type": "SQ8"},
{"M": 32, "efConstruction": 360}, {"nlist": 128}, {},
{}, {"nlist": 64},
{}, {"drop_ratio_build": 0.2},
{"nlist": 64}, {"nlist": 64, "m": 16, "nbits": 8}]
default_all_search_params_params = [{}, {"nprobe": 32}, {"nprobe": 32}, {"nprobe": 32},
{"nprobe": 8, "rbq_bits_query": 6, "refine_k": 1.0},
{"ef": 100}, {"nprobe": 32, "reorder_k": 100}, {"search_list": 30},
{}, {"nprobe": 32},
{"drop_ratio_search": "0.2"}, {"drop_ratio_search": "0.2"},
@ -261,7 +284,7 @@ default_bin_flat_index = {"index_type": "BIN_FLAT", "params": {}, "metric_type":
default_sparse_inverted_index = {"index_type": "SPARSE_INVERTED_INDEX", "metric_type": "IP",
"params": {"drop_ratio_build": 0.2}}
default_text_sparse_inverted_index = {"index_type": "SPARSE_INVERTED_INDEX", "metric_type": "BM25",
"params": {"drop_ratio_build": 0.2, "bm25_k1": 1.5, "bm25_b": 0.75,}}
"params": {"drop_ratio_build": 0.2, "bm25_k1": 1.5, "bm25_b": 0.75,}}
default_search_params = {"params": {"nlist": 128}}
default_search_ip_params = {"metric_type": "IP", "params": {"nlist": 128}}
default_search_binary_params = {"metric_type": "JACCARD", "params": {"nprobe": 32}}

View File

@ -431,7 +431,7 @@ class TestMilvusClientDatabaseValid(TestMilvusClientV2Base):
# 1. create database
db_name = cf.gen_unique_str(db_prefix)
properties = {"database.force.deny.writing": "false",
"database.replica.number": "3"}
"database.replica.number": "1"}
self.create_database(client, db_name, properties=properties)
describe = self.describe_database(client, db_name)
dbs = self.list_databases(client)[0]
@ -440,7 +440,7 @@ class TestMilvusClientDatabaseValid(TestMilvusClientV2Base):
check_task=CheckTasks.check_describe_database_property,
check_items={"db_name": db_name,
"database.force.deny.writing": "false",
"database.replica.number": "3"})
"database.replica.number": "1"})
self.using_database(client, db_name)
# 2. create collection
collection_name = cf.gen_unique_str(prefix)

View File

@ -237,7 +237,7 @@ class TestMilvusClientIndexValid(TestMilvusClientV2Base):
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("index", ct.all_index_types[:7])
@pytest.mark.parametrize("index", ct.all_index_types[:8])
def test_milvus_client_index_with_params(self, index, metric_type):
"""
target: test index with user defined params
@ -284,7 +284,7 @@ class TestMilvusClientIndexValid(TestMilvusClientV2Base):
self.drop_collection(client, collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[:7])
@pytest.mark.parametrize("index", ct.all_index_types[:8])
def test_milvus_client_index_after_insert(self, index, metric_type):
"""
target: test index after insert

View File

@ -694,6 +694,7 @@ class TestMilvusClientSearchIteratorValid(TestMilvusClientV2Base):
default_string_field_name: str(i)} for i in range(default_nb)]
self.insert(client, collection_name, rows)
self.flush(client, collection_name)
self.wait_for_index_ready(client, collection_name, index_name=default_vector_field_name)
# 3. search iterator
vectors_to_search = cf.gen_vectors(1, default_dim)
search_params = {"params": {}}

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001
@ -84,7 +84,7 @@ max_hybrid_search_req_num = ct.max_hybrid_search_req_num
class TestCollectionRangeSearch(TestcaseBase):
""" Test case of range search interface """
@pytest.fixture(scope="function", params=ct.all_index_types[:7])
@pytest.fixture(scope="function", params=ct.all_index_types[:8])
def index_type(self, request):
tags = request.config.getoption("--tags")
if CaseLabel.L2 not in tags:

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001
@ -182,7 +182,7 @@ class TestSearchGroupBy(TestcaseBase):
check_items={"err_code": err_code, "err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[:7])
@pytest.mark.parametrize("index", ct.all_index_types[:8])
def test_search_group_by_unsupported_index(self, index):
"""
target: test search group by with the unsupported vector index

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001
@ -282,7 +282,7 @@ class TestCollectionSearchInvalid(TestcaseBase):
"[expected=COSINE][actual=L2]"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[:7])
@pytest.mark.parametrize("index", ct.all_index_types[:8])
def test_search_invalid_params_type(self, index):
"""
target: test search with invalid search params

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -825,7 +825,7 @@ class TestSearchPaginationIndependent(TestMilvusClientV2Base):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.GPU)
@pytest.mark.parametrize('vector_dtype', ct.all_dense_vector_types)
@pytest.mark.parametrize('index', ct.all_index_types[:7])
@pytest.mark.parametrize('index', ct.all_index_types[:8])
@pytest.mark.parametrize('metric_type', ct.dense_metrics)
def test_search_pagination_dense_vectors_indices_metrics_growing(self, vector_dtype, index, metric_type):
"""

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001
@ -145,7 +145,7 @@ class TestSearchBase(TestcaseBase):
"limit": ct.default_limit})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[7:9])
@pytest.mark.parametrize("index", ct.all_index_types[8:10])
def test_enable_mmap_search_for_binary_indexes(self, index):
"""
target: enable mmap for binary indexes
@ -1226,9 +1226,9 @@ class TestCollectionSearch(TestcaseBase):
"output_fields": output_fields})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index", ct.all_index_types[:7])
@pytest.mark.parametrize("index", ct.all_index_types[:8])
@pytest.mark.parametrize("metrics", ct.dense_metrics)
@pytest.mark.parametrize("limit", [20, 1200])
@pytest.mark.parametrize("limit", [200])
def test_search_output_field_vector_after_different_index_metrics(self, index, metrics, limit):
"""
target: test search with output vector field after different index
@ -2231,7 +2231,7 @@ class TestCollectionSearch(TestcaseBase):
assert res.get(ct.default_string_field_name) == "abc"
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[1:4])
@pytest.mark.parametrize("index", ct.all_index_types[1:5])
def test_search_repeatedly_ivf_index_different_limit(self, index):
"""
target: test create collection repeatedly

View File

@ -64,7 +64,7 @@ default_string_field_name = ct.default_string_field_name
default_json_field_name = ct.default_json_field_name
default_index_params = ct.default_index
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
range_search_supported_indexes = ct.all_index_types[:7]
range_search_supported_indexes = ct.all_index_types[:8]
uid = "test_search"
nq = 1
epsilon = 0.001
@ -85,7 +85,7 @@ class TestSparseSearch(TestcaseBase):
""" Add some test cases for the sparse vector """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
@pytest.mark.parametrize("inverted_index_algo", ct.inverted_index_algo)
def test_sparse_index_search(self, index, inverted_index_algo):
"""
@ -125,7 +125,7 @@ class TestSparseSearch(TestcaseBase):
"output_fields": [ct.default_sparse_vec_field_name]})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
@pytest.mark.parametrize("dim", [32768, ct.max_sparse_vector_dim])
def test_sparse_index_dim(self, index, dim):
"""
@ -151,7 +151,7 @@ class TestSparseSearch(TestcaseBase):
"limit": 1})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
@pytest.mark.parametrize("inverted_index_algo", ct.inverted_index_algo)
def test_sparse_index_enable_mmap_search(self, index, inverted_index_algo):
"""
@ -199,7 +199,7 @@ class TestSparseSearch(TestcaseBase):
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("drop_ratio_build", [0.01])
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
def test_search_sparse_ratio(self, drop_ratio_build, index):
"""
target: create a sparse index by adjusting the ratio parameter.
@ -237,7 +237,7 @@ class TestSparseSearch(TestcaseBase):
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
def test_sparse_vector_search_output_field(self, index):
"""
target: create sparse vectors and search
@ -266,7 +266,7 @@ class TestSparseSearch(TestcaseBase):
})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
@pytest.mark.parametrize("inverted_index_algo", ct.inverted_index_algo)
def test_sparse_vector_search_iterator(self, index, inverted_index_algo):
"""

View File

@ -28,8 +28,11 @@ pytest-parallel
pytest-random-order
# pymilvus
pymilvus==2.6.0rc119
pymilvus[bulk_writer]==2.6.0rc119
pymilvus==2.6.0rc123
pymilvus[bulk_writer]==2.6.0rc123
# for protobuf
protobuf==5.27.2
# for customize config test
python-benedict==0.24.3

View File

@ -274,8 +274,8 @@ class TestAsyncMilvusClientIndexValid(TestMilvusClientV2Base):
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:7],
ct.default_all_indexes_params[:7]))
zip(ct.all_index_types[:8],
ct.default_all_indexes_params[:8]))
async def test_async_milvus_client_create_drop_index_default(self, index, params, metric_type):
"""
target: test create and drop index normal case

View File

@ -336,7 +336,6 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue 1896")
@pytest.mark.parametrize("name", ["12 s", "(mn)", "中文", "%$#"])
async def test_async_milvus_client_load_partitions_invalid_partition_name(self, name):
"""
@ -352,15 +351,13 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
# 1. create collection
await async_client.create_collection(collection_name, default_dim, consistency_level="Strong")
# 2. load partition
error = {ct.err_code: 1100, ct.err_msg: f"Invalid partition name: {name}. collection name can only "
f"contain numbers, letters and underscores: invalid parameter"}
error = {ct.err_code: 1100, ct.err_msg: f"partition not found"}
await async_client.load_partitions(collection_name, name,
check_task=CheckTasks.err_res, check_items=error)
# 3. drop action
await async_client.drop_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="pymilvus issue 1896")
async def test_async_milvus_client_load_partitions_partition_not_existed(self):
"""
target: test load partitions with nonexistent partition name
@ -376,15 +373,13 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
# 1. create collection
await async_client.create_collection(collection_name, default_dim, consistency_level="Strong")
# 2. load partition
error = {ct.err_code: 1100, ct.err_msg: f"partition not found[database=default]"
f"[collection={collection_name}]"}
error = {ct.err_code: 1100, ct.err_msg: f"partition not found"}
await async_client.load_partitions(collection_name, partition_name,
check_task=CheckTasks.err_res, check_items=error)
# 3. drop action
await async_client.drop_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="pymilvus issue 1896")
async def test_async_milvus_client_load_partitions_partition_name_over_max_length(self):
"""
target: test load partitions with partition name over max length 255
@ -400,9 +395,7 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
# 1. create collection
await async_client.create_collection(collection_name, default_dim, consistency_level="Strong")
# 2. load partition
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
f"the length of a collection name must be less than 255 characters: "
f"invalid parameter"}
error = {ct.err_code: 1100, ct.err_msg: f"partition not found"}
await async_client.load_partitions(collection_name, partition_name,
check_task=CheckTasks.err_res, check_items=error)
@ -494,7 +487,6 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue 1896")
@pytest.mark.parametrize("partition_name", ["12 s", "(mn)", "中文", "%$#"])
async def test_async_milvus_client_release_partitions_invalid_partition_name(self, partition_name):
"""
@ -510,15 +502,13 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
# 1. create collection
await async_client.create_collection(collection_name, default_dim)
# 2. release partitions
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of a "
f"partition name must be an underscore or letter.]"}
error = {ct.err_code: 65535, ct.err_msg: f"partition not found"}
await async_client.release_partitions(collection_name, partition_name,
check_task=CheckTasks.err_res, check_items=error)
# 3. drop action
await async_client.drop_collection(collection_name)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue 1896")
async def test_async_milvus_client_release_partitions_invalid_partition_name_list(self):
"""
target: test release partitions with invalid partition name list
@ -534,8 +524,7 @@ class TestAsyncMilvusClientPartitionInvalid(TestMilvusClientV2Base):
await async_client.create_collection(collection_name, default_dim)
# 2. release partition
partition_name = ["12-s"]
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of a "
f"partition name must be an underscore or letter.]"}
error = {ct.err_code: 65535, ct.err_msg: f"partition not found"}
await async_client.release_partitions(collection_name, partition_name,
check_task=CheckTasks.err_res, check_items=error)
# 3. drop action
@ -690,8 +679,8 @@ class TestAsyncMilvusClientPartitionValid(TestMilvusClientV2Base):
assert partition_name not in partitions
await async_client.drop_collection(collection_name)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.skip(reason="blocked by pymilvus issue #2796")
async def test_async_milvus_client_load_release_partitions(self):
"""
target: test load and release partitions normal case

View File

@ -0,0 +1,349 @@
from pymilvus import DataType
from common import common_type as ct
success = "success"
class IVF_RABITQ:
supported_vector_types = [
DataType.FLOAT_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.BFLOAT16_VECTOR
]
supported_metrics = ['L2', 'IP', 'COSINE']
build_params = [
# nlist params test
{
"description": "Minimum Boundary Test",
"params": {"nlist": 1},
"expected": success
},
{
"description": "Maximum Boundary Test",
"params": {"nlist": 65536},
"expected": success
},
{
"description": "Out of Range Test - Negative",
"params": {"nlist": -1},
"expected": {"err_code": 999, "err_msg": "param 'nlist' (-1) should be in range [1, 65536]"}
},
{
"description": "Out of Range Test - Too Large",
"params": {"nlist": 65537},
"expected": {"err_code": 999, "err_msg": "param 'nlist' (65537) should be in range [1, 65536]"}
},
{
"description": "String Type Test will ignore the wrong type",
"params": {"nlist": "128"},
"expected": success
},
{
"description": "Float Type Test",
"params": {"nlist": 128.0},
"expected": {"err_code": 999,
"err_msg": "wrong data type in json"}
},
{
"description": "Boolean Type Test",
"params": {"nlist": True},
"expected": {"err_code": 999,
"err_msg": "invalid integer value, key: 'nlist', value: 'True': invalid parameter"}
},
{
"description": "None Type Test, use default value",
"params": {"nlist": None},
"expected": success
},
{
"description": "List Type Test",
"params": {"nlist": [128]},
"expected": {"err_code": 999,
"err_msg": "invalid integer value, key: 'nlist', value: '[128]': invalid parameter"}
},
# refine params test
{
"description": "Enable Refine Test",
"params": {"refine": 'true'}, # to be fixed: #41760
"expected": success
},
{
"description": "Disable Refine Test",
"params": {"refine": 'false'}, # to be fixed: #41760
"expected": success
},
# refine_type test
{
"description": "Refine Type Test",
"params": {"refine_type": "PQ"},
"expected": {"err_code": 999,
"err_msg": "invalid refine type : PQ, optional types are [sq6, sq8, fp16, bf16]"}
},
{
"description": "SQ6 Test",
"params": {"refine": 'true', "refine_type": "SQ6"},
"expected": success
},
{
"description": "SQ8 Test",
"params": {"refine": 'true', "refine_type": "SQ8"},
"expected": success
},
{
"description": "FP16 Test",
"params": {"refine": 'true', "refine_type": "FP16"},
"expected": success
},
{
"description": "BF16 Test",
"params": {"refine": 'true', "refine_type": "BF16"},
"expected": success
},
{
"description": "FP32 Test",
"params": {"refine": 'true', "refine_type": "FP32"},
"expected": success
},
{
"description": "Invalid Refine Type Test",
"params": {"refine": 'true', "refine_type": "INVALID"},
"expected": {"err_code": 999,
"err_msg": "invalid refine type : INVALID, optional types are [sq6, sq8, fp16, bf16]"}
},
{
"description": "Integer Type Test",
"params": {"refine": 1},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'refine' (\"1\") should be a boolean"}
},
{
"description": "None Type Test will success with default value",
"params": {"refine": None},
"expected": success
},
{
"description": "Lowercase String Test",
"params": {"refine": 'true', "refine_type": "sq6"},
"expected": success
},
{
"description": "Mixed Case String Test",
"params": {"refine": 'true', "refine_type": "Sq8.0"},
"expected": {"err_code": 999,
"err_msg": "invalid refine type : Sq8.0, optional types are [sq6, sq8, fp16, bf16]"}
},
{
"description": "Whitespace String Test",
"params": {"refine_type": " SQ8 "},
"expected": {"err_code": 999,
"err_msg": "invalid refine type : SQ8 , optional types are [sq6, sq8, fp16, bf16]"}
},
{
"description": "Integer Type Test",
"params": {"refine": 'true', "refine_type": 8},
"expected": {"err_code": 999,
"err_msg": "invalid refine type : 8, optional types are [sq6, sq8, fp16, bf16]"}
},
{
"description": "None Type Test",
"params": {"refine": 'true', "refine_type": None},
"expected": success
},
# combination params test
{
"description": "Optimal Performance Combination Test",
"params": {"nlist": 128, "refine": 'true', "refine_type": "SQ8"},
"expected": success
},
{
"description": "not refine with refine_type",
"params": {"nlist": 127, "refine": 'false', "refine_type": "fp16"},
"expected": success
},
{
"description": "empty dict params",
"params": {},
"expected": success
},
{
"description": "not_defined_param in the dict params",
"params": {"nlist": 127, "refine": 'true', "not_defined_param": "nothing"},
"expected": success
},
]
search_params = [
# nprobe params test
{
"description": "Minimum Boundary Test",
"params": {"nprobe": 1},
"expected": success
},
{
"description": "Equal to nlist Test",
"params": {"nprobe": 128}, # Assuming nlist=128
"expected": success
},
{
"description": "Exceed nlist Test",
"params": {"nprobe": 129}, # Assuming nlist=128
"expected": success # to be fixed: #41765
},
{
"description": "Negative Value Test",
"params": {"nprobe": -1},
"expected": {"err_code": 999,
"err_msg": "Out of range in json: param 'nprobe' (-1) should be in range [1, 65536]"}
},
{
"description": "String Type Test, not check data type",
"params": {"nprobe": "32"},
"expected": success # to be fixed: #41767
},
{
"description": "Float Type Test",
"params": {"nprobe": 32.0},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'nprobe' (32.0) should be integer"}
},
{
"description": "Boolean Type Test",
"params": {"nprobe": True},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'nprobe' (true) should be integer"}
},
{
"description": "None Type Test",
"params": {"nprobe": None},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'nprobe' (null) should be integer"}
},
# rbq_bits_query test
{
"description": "Default Value Test",
"params": {"rbq_bits_query": 0},
"expected": success
},
{
"description": "Maximum Value Test",
"params": {"rbq_bits_query": 8},
"expected": success
},
{
"description": "Recommended Value Test - 6bit",
"params": {"rbq_bits_query": 6},
"expected": success
},
{
"description": "Out of Range Test",
"params": {"rbq_bits_query": 9},
"expected": {"err_code": 999,
"err_msg": "Out of range in json: param 'rbq_bits_query' (9) should be in range [0, 8]"}
},
{
"description": "Negative Value Test",
"params": {"rbq_bits_query": -1},
"expected": {"err_code": 999,
"err_msg": "Out of range in json: param 'rbq_bits_query' (-1) should be in range [0, 8]"}
},
{
"description": "String Type Test",
"params": {"rbq_bits_query": "6"},
"expected": success # to be fixed: #41767
},
{
"description": "Float Type Test",
"params": {"rbq_bits_query": 6.0},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'rbq_bits_query' (6.0) should be integer"}
},
{
"description": "Boolean Type Test",
"params": {"rbq_bits_query": True},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'rbq_bits_query' (true) should be integer"}
},
{
"description": "None Type Test",
"params": {"rbq_bits_query": None},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'rbq_bits_query' (null) should be integer"}
},
# refine_k test
{
"description": "Default Value Test",
"params": {"refine_k": 1.0},
"expected": success
},
{
"description": "Recommended Value Test - 2",
"params": {"refine_k": 2.0},
"expected": success
},
{
"description": "Recommended Value Test - 5",
"params": {"refine_k": 5.0},
"expected": success
},
{
"description": "Less Than One Test",
"params": {"refine_k": 0.5},
"expected": {"err_code": 999,
"err_msg": "Out of range in json: param 'refine_k' (0.5) should be in range [1.000000, 340282346638528859811704183484516925440.000000]"}
},
{
"description": "Negative Value Test",
"params": {"refine_k": -1.0},
"expected": {"err_code": 999,
"err_msg": "Out of range in json: param 'refine_k' (-1.0) should be in range [1.000000, 340282346638528859811704183484516925440.000000]"}
},
{
"description": "String Type Test",
"params": {"refine_k": "2.0"},
"expected": success # to be fixed: #41767
},
{
"description": "Integer Type Test",
"params": {"refine_k": 2},
"expected": success
},
{
"description": "Boolean Type Test",
"params": {"refine_k": True},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'refine_k' (true) should be a number"}
},
{
"description": "None Type Test",
"params": {"refine_k": None},
"expected": {"err_code": 999,
"err_msg": "Type conflict in json: param 'refine_k' (null) should be a number"}
},
# combination params test
{
"description": "Optimal Performance Combination Test",
"params": { "nprobe": 32, "rbq_bits_query": 6, "refine_k": 2.0},
"expected": success
},
{
"description": "Highest Recall Combination Test",
"params": { "nprobe": 128, "rbq_bits_query": 0, "refine_k": 5.0},
"expected": success
},
{
"description": "empty dict params",
"params": {},
"expected": success
},
]

View File

@ -0,0 +1,315 @@
import logging
from utils.util_pymilvus import *
from common.common_type import CaseLabel, CheckTasks
from common import common_type as ct
from common import common_func as cf
from base.client_v2_base import TestMilvusClientV2Base
import pytest
from idx_ivf_rabitq import IVF_RABITQ
index_type = "IVF_RABITQ"
success = "success"
pk_field_name = 'id'
vector_field_name = 'vector'
dim = ct.default_dim
default_nb = 2000
default_build_params = {"nlist": 128, "refine": 'true', "refine_type": "SQ8"}
default_search_params = {"nprobe": 8, "rbq_bits_query": 6, "refine_k": 1.0}
class TestIvfRabitqBuildParams(TestMilvusClientV2Base):
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("params", IVF_RABITQ.build_params)
def test_ivf_rabitq_build_params(self, params):
"""
Test the build params of IVF_RABITQ index
"""
client = self._client()
collection_name = cf.gen_collection_name_by_testcase_name()
schema, _ = self.create_schema(client)
schema.add_field(pk_field_name, datatype=DataType.INT64, is_primary=True, auto_id=False)
schema.add_field(vector_field_name, datatype=DataType.FLOAT_VECTOR, dim=dim)
self.create_collection(client, collection_name, schema=schema)
# Insert data in 3 batches with unique primary keys using a loop
insert_times = 2
random_vectors = list(cf.gen_vectors(default_nb * insert_times, dim, vector_data_type=DataType.FLOAT_VECTOR))
for j in range(insert_times):
start_pk = j * default_nb
rows = [{
pk_field_name: i + start_pk,
vector_field_name: random_vectors[i + start_pk]
} for i in range(default_nb)]
self.insert(client, collection_name, rows)
self.flush(client, collection_name)
# create index
build_params = params.get("params", None)
index_params = self.prepare_index_params(client)[0]
index_params.add_index(field_name=vector_field_name,
metric_type=cf.get_default_metric_for_vector_type(vector_type=DataType.FLOAT_VECTOR),
index_type=index_type,
params=build_params)
# build index
if params.get("expected", None) != success:
self.create_index(client, collection_name, index_params,
check_task=CheckTasks.err_res,
check_items=params.get("expected"))
else:
self.create_index(client, collection_name, index_params)
self.wait_for_index_ready(client, collection_name, index_name=vector_field_name)
# load collection
self.load_collection(client, collection_name)
# search
nq = 2
search_vectors = cf.gen_vectors(nq, dim=dim, vector_data_type=DataType.FLOAT_VECTOR)
self.search(client, collection_name, search_vectors,
search_params=default_search_params,
limit=ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"enable_milvus_client_api": True,
"nq": nq,
"limit": ct.default_limit,
"pk_name": pk_field_name})
# verify the index params are persisted
idx_info = client.describe_index(collection_name, vector_field_name)
# check every key and value in build_params exists in idx_info
if build_params is not None:
for key, value in build_params.items():
if value is not None:
assert key in idx_info.keys()
# assert value in idx_info.values() # TODO: uncommented after #41783 fixed
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("vector_data_type", ct.all_vector_types)
def test_ivf_rabitq_on_all_vector_types(self, vector_data_type):
"""
Test ivf_rabitq index on all the vector types and metrics
"""
client = self._client()
collection_name = cf.gen_collection_name_by_testcase_name()
schema, _ = self.create_schema(client)
schema.add_field(pk_field_name, datatype=DataType.INT64, is_primary=True, auto_id=False)
if vector_data_type == DataType.SPARSE_FLOAT_VECTOR:
schema.add_field(vector_field_name, datatype=vector_data_type)
else:
schema.add_field(vector_field_name, datatype=vector_data_type, dim=dim)
self.create_collection(client, collection_name, schema=schema)
# Insert data in 3 batches with unique primary keys using a loop
insert_times = 2
random_vectors = list(cf.gen_vectors(default_nb*insert_times, default_dim, vector_data_type=vector_data_type)) \
if vector_data_type == DataType.FLOAT_VECTOR \
else cf.gen_vectors(default_nb*insert_times, default_dim, vector_data_type=vector_data_type)
for j in range(insert_times):
start_pk = j * default_nb
rows = [{
pk_field_name: i + start_pk,
vector_field_name: random_vectors[i + start_pk]
} for i in range(default_nb)]
self.insert(client, collection_name, rows)
self.flush(client, collection_name)
# create index
index_params = self.prepare_index_params(client)[0]
metric_type = cf.get_default_metric_for_vector_type(vector_data_type)
index_params.add_index(field_name=vector_field_name,
metric_type=metric_type,
index_type=index_type,
nlist=128, # flatten the params
refine=True,
refine_type="SQ8")
if vector_data_type not in IVF_RABITQ.supported_vector_types:
self.create_index(client, collection_name, index_params,
check_task=CheckTasks.err_res,
check_items={"err_code": 999,
"err_msg": f"can't build with this index IVF_RABITQ: invalid parameter"})
else:
self.create_index(client, collection_name, index_params)
self.wait_for_index_ready(client, collection_name, index_name=vector_field_name)
# load collection
self.load_collection(client, collection_name)
# search
nq = 2
search_vectors = cf.gen_vectors(nq, dim=dim, vector_data_type=vector_data_type)
self.search(client, collection_name, search_vectors,
search_params=default_search_params,
limit=ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"enable_milvus_client_api": True,
"nq": nq,
"limit": ct.default_limit,
"pk_name": pk_field_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("metric", IVF_RABITQ.supported_metrics)
def test_ivf_rabitq_on_all_metrics(self, metric):
"""
Test the search params of IVF_RABITQ index
"""
client = self._client()
collection_name = cf.gen_collection_name_by_testcase_name()
schema, _ = self.create_schema(client)
schema.add_field(pk_field_name, datatype=DataType.INT64, is_primary=True, auto_id=False)
schema.add_field(vector_field_name, datatype=DataType.FLOAT_VECTOR, dim=dim)
self.create_collection(client, collection_name, schema=schema)
# insert data
insert_times = 2
random_vectors = list(cf.gen_vectors(default_nb*insert_times, default_dim, vector_data_type=DataType.FLOAT_VECTOR))
for j in range(insert_times):
start_pk = j * default_nb
rows = [{
pk_field_name: i + start_pk,
vector_field_name: random_vectors[i + start_pk]
} for i in range(default_nb)]
self.insert(client, collection_name, rows)
self.flush(client, collection_name)
# create index
index_params = self.prepare_index_params(client)[0]
index_params.add_index(field_name=vector_field_name,
metric_type=metric,
index_type=index_type,
nlist=128,
refine=True,
refine_type="SQ8")
self.create_index(client, collection_name, index_params)
self.wait_for_index_ready(client, collection_name, index_name=vector_field_name)
# load collection
self.load_collection(client, collection_name)
# search
nq = 2
search_vectors = cf.gen_vectors(nq, dim=dim, vector_data_type=DataType.FLOAT_VECTOR)
self.search(client, collection_name, search_vectors,
search_params=default_search_params,
limit=ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"enable_milvus_client_api": True,
"nq": nq,
"limit": ct.default_limit,
"pk_name": pk_field_name})
@pytest.mark.xdist_group("TestIvfRabitqSearchParams")
class TestIvfRabitqSearchParams(TestMilvusClientV2Base):
"""Test search with pagination functionality"""
def setup_class(self):
super().setup_class(self)
self.collection_name = "TestIvfRabitqSearchParams" + cf.gen_unique_str("_")
self.float_vector_field_name = vector_field_name
self.float_vector_dim = dim
self.primary_keys = []
self.enable_dynamic_field = False
self.datas = []
@pytest.fixture(scope="class", autouse=True)
def prepare_collection(self, request):
"""
Initialize collection before test class runs
"""
# Get client connection
client = self._client()
# Create collection
collection_schema = self.create_schema(client)[0]
collection_schema.add_field(pk_field_name, DataType.INT64, is_primary=True, auto_id=False)
collection_schema.add_field(self.float_vector_field_name, DataType.FLOAT_VECTOR, dim=128)
self.create_collection(client, self.collection_name, schema=collection_schema,
enable_dynamic_field=self.enable_dynamic_field, force_teardown=False)
# Define number of insert iterations
insert_times = 2
# Generate vectors for each type and store in self
float_vectors = cf.gen_vectors(default_nb * insert_times, dim=self.float_vector_dim,
vector_data_type=DataType.FLOAT_VECTOR)
# Insert data multiple times with non-duplicated primary keys
for j in range(insert_times):
# Group rows by partition based on primary key mod 3
rows = []
for i in range(default_nb):
pk = i + j * default_nb
row = {
pk_field_name: pk,
self.float_vector_field_name: list(float_vectors[pk])
}
self.datas.append(row)
rows.append(row)
# Insert into respective partitions
self.insert(client, self.collection_name, data=rows)
# Track all inserted data and primary keys
self.primary_keys.extend([i + j * default_nb for i in range(default_nb)])
self.flush(client, self.collection_name)
# Create index
index_params = self.prepare_index_params(client)[0]
index_params.add_index(field_name=self.float_vector_field_name,
metric_type="COSINE",
index_type="IVF_RABITQ",
params={"nlist": 128, "refine": 'true', "refine_type": "SQ8"})
self.create_index(client, self.collection_name, index_params=index_params)
self.wait_for_index_ready(client, self.collection_name, index_name=self.float_vector_field_name)
# Load collection
self.load_collection(client, self.collection_name)
def teardown():
self.drop_collection(self._client(), self.collection_name)
request.addfinalizer(teardown)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("params", IVF_RABITQ.search_params)
def test_ivf_rabitq_search_params(self, params):
"""
Test the search params of IVF_RABITQ index
"""
client = self._client()
collection_name = self.collection_name
# search
nq = 2
search_vectors = cf.gen_vectors(nq, dim=self.float_vector_dim, vector_data_type=DataType.FLOAT_VECTOR)
search_params = params.get("params", None)
if params.get("expected", None) != success:
self.search(client, collection_name, search_vectors,
search_params=search_params,
limit=ct.default_limit,
check_task=CheckTasks.err_res,
check_items=params.get("expected"))
else:
self.search(client, collection_name, search_vectors,
search_params=search_params,
limit=ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"enable_milvus_client_api": True,
"nq": nq,
"limit": ct.default_limit,
"pk_name": pk_field_name})
if len(search_params.keys()) == 3:
# try to search again with flattened params
search_params = {
"nprobe": search_params["nprobe"],
"rbq_bits_query": search_params["rbq_bits_query"],
"refine_k": search_params["refine_k"]
}
self.search(client, collection_name, search_vectors,
search_params=search_params,
limit=ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"enable_milvus_client_api": True,
"nq": nq,
"limit": ct.default_limit,
"pk_name": pk_field_name})

View File

@ -426,7 +426,7 @@ class TestNewIndexBase(TestcaseBase):
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("index_type", ct.all_index_types[0:7])
@pytest.mark.parametrize("index_type", ct.all_index_types[0:8])
def test_create_index_default(self, index_type):
"""
target: test create index interface
@ -892,7 +892,7 @@ class TestNewIndexBase(TestcaseBase):
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ct.all_index_types[:6])
@pytest.mark.parametrize("index", ct.all_index_types[:7])
def test_drop_mmap_index(self, index):
"""
target: disabling and re-enabling mmap for index
@ -1386,7 +1386,7 @@ class TestIndexInvalid(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("metric_type", ["L2", "COSINE", " ", "invalid"])
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
def test_invalid_sparse_metric_type(self, metric_type, index):
"""
target: unsupported metric_type create index
@ -1407,7 +1407,7 @@ class TestIndexInvalid(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("ratio", [-0.5, 1, 3])
@pytest.mark.parametrize("index ", ct.all_index_types[9:11])
@pytest.mark.parametrize("index ", ct.all_index_types[10:12])
def test_invalid_sparse_ratio(self, ratio, index):
"""
target: index creation for unsupported ratio parameter
@ -1428,7 +1428,7 @@ class TestIndexInvalid(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("inverted_index_algo", ["INVALID_ALGO"])
@pytest.mark.parametrize("index ", ct.all_index_types[9:11])
@pytest.mark.parametrize("index ", ct.all_index_types[10:12])
def test_invalid_sparse_inverted_index_algo(self, inverted_index_algo, index):
"""
target: index creation for unsupported ratio parameter

View File

@ -1428,7 +1428,7 @@ class TestInsertInvalid(TestcaseBase):
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index ", ct.all_index_types[9:11])
@pytest.mark.parametrize("index ", ct.all_index_types[10:12])
@pytest.mark.parametrize("invalid_vector_type ", ct.all_dense_vector_types)
def test_invalid_sparse_vector_data(self, index, invalid_vector_type):
"""
@ -2154,7 +2154,7 @@ class TestUpsertValid(TestcaseBase):
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index ", ct.all_index_types[9:11])
@pytest.mark.parametrize("index ", ct.all_index_types[10:12])
def test_upsert_sparse_data(self, index):
"""
target: multiple upserts and counts(*)

View File

@ -4205,7 +4205,7 @@ class TestQueryCount(TestcaseBase):
"pk_name": collection_w.primary_field.name})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index", ct.all_index_types[9:11])
@pytest.mark.parametrize("index", ct.all_index_types[10:12])
def test_counts_expression_sparse_vectors(self, index):
"""
target: test count with expr

View File

@ -47,9 +47,9 @@ def api_request(_list, **kwargs):
arg = _list[1:]
arg_str = str(arg)
log_arg = arg_str[0:log_row_length] + '......' if len(arg_str) > log_row_length else arg_str
# if enable_traceback == "True":
if kwargs.get("enable_traceback", True):
log.debug("(api_request) : [%s] args: %s, kwargs: %s" % (func.__qualname__, log_arg, str(kwargs)))
log_kwargs = str(kwargs)[0:log_row_length] + '......' if len(str(kwargs)) > log_row_length else str(kwargs)
log.debug("(api_request) : [%s] args: %s, kwargs: %s" % (func.__qualname__, log_arg, log_kwargs))
return func(*arg, **kwargs)
return False, False
@ -61,7 +61,8 @@ def logger_interceptor():
arg_str = str(arg)
log_arg = arg_str[0:log_row_length] + '......' if len(arg_str) > log_row_length else arg_str
if kwargs.get("enable_traceback", True):
log.debug("(api_request) : [%s] args: %s, kwargs: %s" % (func.__name__, log_arg, str(kwargs)))
log_kwargs = str(kwargs)[0:log_row_length] + '......' if len(str(kwargs)) > log_row_length else str(kwargs)
log.debug("(api_request) : [%s] args: %s, kwargs: %s" % (func.__name__, log_arg, log_kwargs))
def log_response(res, **kwargs):
if kwargs.get("enable_traceback", True):