mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
* Update pymilvus-orm version in test requirements Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com> * Update some tests Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com> * Update error msg Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com>
This commit is contained in:
parent
a992dcf6a8
commit
863243c0fc
@ -76,10 +76,10 @@ class InsertFlushChecker(Checker):
|
||||
def __init__(self, flush=False):
|
||||
super().__init__()
|
||||
self._flush = flush
|
||||
self.init_entities = self.c_wrap.num_entities
|
||||
|
||||
def keep_running(self):
|
||||
while self._running:
|
||||
init_entities = self.c_wrap.num_entities
|
||||
_, insert_result = \
|
||||
self.c_wrap.insert(data=cf.gen_default_list_data(nb=constants.DELTA_PER_INS),
|
||||
timeout=timeout, check_task='check_nothing')
|
||||
@ -90,7 +90,7 @@ class InsertFlushChecker(Checker):
|
||||
self._fail += 1
|
||||
sleep(constants.WAIT_PER_OP / 10)
|
||||
else:
|
||||
if self.c_wrap.num_entities == (init_entities + constants.DELTA_PER_INS):
|
||||
if self.c_wrap.num_entities == (self.init_entities + constants.DELTA_PER_INS):
|
||||
self._succ += 1
|
||||
else:
|
||||
self._fail += 1
|
||||
|
||||
@ -32,6 +32,6 @@ ENTITIES_FOR_SEARCH = 1000
|
||||
|
||||
CHAOS_CONFIG_ENV = 'CHAOS_CONFIG_PATH' # env variables for chao path
|
||||
TESTS_CONFIG_LOCATION = 'chaos_objects/'
|
||||
ALL_CHAOS_YAMLS = 'chaos_*.yaml'
|
||||
ALL_CHAOS_YAMLS = 'chaos_datanode*.yaml'
|
||||
WAIT_PER_OP = 10
|
||||
DEFAULT_INDEX_PARAM = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
|
||||
|
||||
@ -165,7 +165,7 @@ class TestChaos(TestChaosBase):
|
||||
reset_counting(self.health_checkers)
|
||||
|
||||
# wait 300s (varies by feature)
|
||||
sleep(constants.WAIT_PER_OP*2.5)
|
||||
sleep(constants.WAIT_PER_OP*5)
|
||||
|
||||
# assert statistic: all ops success again
|
||||
log.debug("******3rd assert after chaos deleted: ")
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
[pytest]
|
||||
|
||||
|
||||
addopts = --host 172.20.255.155 --html=/milvus/tests20/python_client/report.html --self-contained-html
|
||||
addopts = --host 10.98.0.7 --html=/tmp/ci_logs/report.html --self-contained-html -v
|
||||
# -;addopts = --host 172.28.255.155 --html=/tmp/report.html
|
||||
# python3 -W ignore -m pytest
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ pytest-print==0.2.1
|
||||
pytest-level==0.1.1
|
||||
pytest-xdist==2.2.1
|
||||
pytest-parallel
|
||||
pymilvus-orm==2.0.0rc2.dev31
|
||||
pymilvus-orm==2.0.0rc3.dev6
|
||||
pytest-rerunfailures==9.1.1
|
||||
git+https://github.com/Projectplace/pytest-tags
|
||||
ndg-httpsclient
|
||||
|
||||
@ -6,6 +6,7 @@ from base.client_base import TestcaseBase
|
||||
from common import common_func as cf
|
||||
from common import common_type as ct
|
||||
from common.common_type import CaseLabel, CheckTasks
|
||||
from common.code_mapping import PartitionErrorMessage
|
||||
|
||||
prefix = "partition_"
|
||||
|
||||
@ -285,10 +286,10 @@ class TestPartitionParams(TestcaseBase):
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
|
||||
@pytest.mark.parametrize("data, nums", [(cf.gen_default_dataframe_data(10), 10),
|
||||
(cf.gen_default_list_data(1), 1),
|
||||
(cf.gen_default_tuple_data(10), 10)])
|
||||
def test_partition_insert(self, partition_name, data, nums):
|
||||
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
|
||||
cf.gen_default_list_data(10),
|
||||
cf.gen_default_tuple_data(10)])
|
||||
def test_partition_insert(self, partition_name, data):
|
||||
"""
|
||||
target: verify insert multi entities by dataFrame
|
||||
method: 1. create a collection and a partition
|
||||
@ -296,6 +297,7 @@ class TestPartitionParams(TestcaseBase):
|
||||
3. insert data again
|
||||
expected: 1. insert data successfully
|
||||
"""
|
||||
nums = 10
|
||||
# create collection
|
||||
collection_w = self.init_collection_wrap()
|
||||
|
||||
@ -381,7 +383,6 @@ class TestPartitionOperations(TestcaseBase):
|
||||
assert collection_w.has_partition(partition_name)[0]
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip(reason="issue 6321")
|
||||
def test_partition_maximum_partitions(self):
|
||||
"""
|
||||
target: verify create maximum partitions
|
||||
@ -397,7 +398,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
for _ in range(ct.max_partition_num // threads_n):
|
||||
name = cf.gen_unique_str(prefix)
|
||||
par_wrap = ApiPartitionWrapper()
|
||||
par_wrap.init_partition(collection, name, check_task="check_nothing")
|
||||
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
|
||||
|
||||
collection_w = self.init_collection_wrap()
|
||||
for _ in range(threads_num):
|
||||
@ -414,8 +415,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
ct.err_msg: "maximum partition's number should be limit to 4096"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.parametrize("partition_name", [ct.default_partition_name])
|
||||
def test_partition_drop_default_partition(self, partition_name):
|
||||
def test_partition_drop_default_partition(self):
|
||||
"""
|
||||
target: verify drop the _default partition
|
||||
method: 1. drop the _default partition
|
||||
@ -456,7 +456,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
# verify that drop the partition again with exception
|
||||
partition_w.drop(check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
|
||||
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
|
||||
@ -513,7 +513,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
# @pytest.mark.parametrize("flush", [True, False])
|
||||
@pytest.mark.parametrize("partition_name, data", [(cf.gen_unique_str(prefix), cf.gen_default_list_data(nb=10))])
|
||||
@pytest.mark.parametrize("partition_name, data", [(cf.gen_unique_str(prefix), cf.gen_default_list_data(nb=3000))])
|
||||
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
|
||||
def test_partition_drop_indexed_partition(self, partition_name, data, index_param):
|
||||
"""
|
||||
@ -533,7 +533,8 @@ class TestPartitionOperations(TestcaseBase):
|
||||
assert collection_w.has_partition(partition_name)[0]
|
||||
|
||||
# insert data to partition
|
||||
partition_w.insert(data)
|
||||
ins_res, _ = partition_w.insert(data)
|
||||
assert len(ins_res.primary_keys) == len(data[0])
|
||||
|
||||
# create index of collection
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_param)
|
||||
@ -579,7 +580,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
# release the dropped partition and check err response
|
||||
partition_w.release(check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
|
||||
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
|
||||
@ -718,7 +719,7 @@ class TestPartitionOperations(TestcaseBase):
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_partition_insert_maximum_size_data(self, data):
|
||||
"""
|
||||
target: verify insert maximum size data(256M?) a time
|
||||
@ -734,7 +735,8 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
# insert data to partition
|
||||
max_size = 100000 # TODO: clarify the max size of data
|
||||
partition_w.insert(cf.gen_default_dataframe_data(max_size))
|
||||
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size))
|
||||
assert len(ins_res.primary_keys) == max_size
|
||||
# self._connect().flush([collection_w.name])
|
||||
assert partition_w.num_entities == max_size
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user