mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-30 23:45:28 +08:00
Merge branch 'branch-0.4.0' into 'branch-0.4.0'
refine code See merge request megasearch/milvus!483 Former-commit-id: 5323139fbe9f0e080d6287754651ef285cb461a9
This commit is contained in:
commit
5986de3f8a
@ -53,11 +53,15 @@ DBImpl::~DBImpl() {
|
||||
Stop();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//external api
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
Status DBImpl::Start() {
|
||||
if (!shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
ENGINE_LOG_TRACE << "DB service start";
|
||||
shutting_down_.store(false, std::memory_order_release);
|
||||
|
||||
//for distribute version, some nodes are read only
|
||||
@ -75,30 +79,40 @@ Status DBImpl::Stop() {
|
||||
}
|
||||
|
||||
shutting_down_.store(true, std::memory_order_release);
|
||||
bg_timer_thread_.join();
|
||||
|
||||
//wait compaction/buildindex finish
|
||||
for(auto& result : compact_thread_results_) {
|
||||
result.wait();
|
||||
}
|
||||
|
||||
for(auto& result : index_thread_results_) {
|
||||
result.wait();
|
||||
}
|
||||
|
||||
//makesure all memory data serialized
|
||||
MemSerialize();
|
||||
|
||||
//wait compaction/buildindex finish
|
||||
bg_timer_thread_.join();
|
||||
|
||||
if (options_.mode != Options::MODE::READ_ONLY) {
|
||||
meta_ptr_->CleanUp();
|
||||
}
|
||||
|
||||
ENGINE_LOG_TRACE << "DB service stop";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::DropAll() {
|
||||
return meta_ptr_->DropAll();
|
||||
}
|
||||
|
||||
Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::TableSchema temp_schema = table_schema;
|
||||
temp_schema.index_file_size_ *= ONE_MB;
|
||||
return meta_ptr_->CreateTable(temp_schema);
|
||||
}
|
||||
|
||||
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
//dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
@ -121,18 +135,34 @@ Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& date
|
||||
}
|
||||
|
||||
Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->DescribeTable(table_schema);
|
||||
}
|
||||
|
||||
Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->HasTable(table_id, has_or_not);
|
||||
}
|
||||
|
||||
Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->AllTables(table_schema_array);
|
||||
}
|
||||
|
||||
Status DBImpl::PreloadTable(const std::string &table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
|
||||
meta::DatesT dates;
|
||||
@ -174,16 +204,27 @@ Status DBImpl::PreloadTable(const std::string &table_id) {
|
||||
}
|
||||
|
||||
Status DBImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->UpdateTableFlag(table_id, flag);
|
||||
}
|
||||
|
||||
Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->Count(table_id, row_count);
|
||||
}
|
||||
|
||||
Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
Status status;
|
||||
zilliz::milvus::server::CollectInsertMetrics metrics(n, status);
|
||||
@ -196,8 +237,82 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
return status;
|
||||
}
|
||||
|
||||
Status DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
|
||||
//step 1: check index difference
|
||||
TableIndex old_index;
|
||||
auto status = DescribeIndex(table_id, old_index);
|
||||
if(!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
|
||||
//step 2: update index info
|
||||
if(!utils::IsSameIndex(old_index, index)) {
|
||||
DropIndex(table_id);
|
||||
|
||||
status = meta_ptr_->UpdateTableIndexParam(table_id, index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//step 3: wait and build index
|
||||
//for IDMAP type, only wait all NEW file converted to RAW file
|
||||
//for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if(index.engine_type_ == (int)EngineType::FAISS_IDMAP) {
|
||||
file_types = {
|
||||
(int) meta::TableFileSchema::NEW,
|
||||
(int) meta::TableFileSchema::NEW_MERGE,
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
(int) meta::TableFileSchema::RAW,
|
||||
(int) meta::TableFileSchema::NEW,
|
||||
(int) meta::TableFileSchema::NEW_MERGE,
|
||||
(int) meta::TableFileSchema::NEW_INDEX,
|
||||
(int) meta::TableFileSchema::TO_INDEX,
|
||||
};
|
||||
}
|
||||
|
||||
std::vector<std::string> file_ids;
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
int times = 1;
|
||||
|
||||
while (!file_ids.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if(index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10*1000, times*100)));
|
||||
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
times++;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
|
||||
return meta_ptr_->DescribeTableIndex(table_id, index);
|
||||
}
|
||||
|
||||
Status DBImpl::DropIndex(const std::string& table_id) {
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
return meta_ptr_->DropTableIndex(table_id);
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq, uint64_t nprobe,
|
||||
const float *vectors, QueryResults &results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::DatesT dates = {utils::GetDate()};
|
||||
Status result = Query(table_id, k, nq, nprobe, vectors, dates, results);
|
||||
|
||||
@ -206,6 +321,10 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq, uint6
|
||||
|
||||
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id;
|
||||
|
||||
//get all table files from table
|
||||
@ -230,6 +349,10 @@ Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint6
|
||||
Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids,
|
||||
uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, QueryResults& results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id;
|
||||
|
||||
//get specified files
|
||||
@ -264,6 +387,18 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
|
||||
return status;
|
||||
}
|
||||
|
||||
Status DBImpl::Size(uint64_t& result) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->Size(result);
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//internal methods
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
|
||||
uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, QueryResults& results) {
|
||||
@ -563,76 +698,6 @@ void DBImpl::StartBuildIndexTask(bool force) {
|
||||
}
|
||||
}
|
||||
|
||||
Status DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
|
||||
//step 1: check index difference
|
||||
TableIndex old_index;
|
||||
auto status = DescribeIndex(table_id, old_index);
|
||||
if(!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
|
||||
//step 2: update index info
|
||||
if(!utils::IsSameIndex(old_index, index)) {
|
||||
DropIndex(table_id);
|
||||
|
||||
status = meta_ptr_->UpdateTableIndexParam(table_id, index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//step 3: wait and build index
|
||||
//for IDMAP type, only wait all NEW file converted to RAW file
|
||||
//for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if(index.engine_type_ == (int)EngineType::FAISS_IDMAP) {
|
||||
file_types = {
|
||||
(int) meta::TableFileSchema::NEW,
|
||||
(int) meta::TableFileSchema::NEW_MERGE,
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
(int) meta::TableFileSchema::RAW,
|
||||
(int) meta::TableFileSchema::NEW,
|
||||
(int) meta::TableFileSchema::NEW_MERGE,
|
||||
(int) meta::TableFileSchema::NEW_INDEX,
|
||||
(int) meta::TableFileSchema::TO_INDEX,
|
||||
};
|
||||
}
|
||||
|
||||
std::vector<std::string> file_ids;
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
int times = 1;
|
||||
|
||||
while (!file_ids.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if(index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10*1000, times*100)));
|
||||
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
times++;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
|
||||
return meta_ptr_->DescribeTableIndex(table_id, index);
|
||||
}
|
||||
|
||||
Status DBImpl::DropIndex(const std::string& table_id) {
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
return meta_ptr_->DropTableIndex(table_id);
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
ExecutionEnginePtr to_index =
|
||||
EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
@ -775,15 +840,6 @@ void DBImpl::BackgroundBuildIndex() {
|
||||
ENGINE_LOG_TRACE << "Background build index thread exit";
|
||||
}
|
||||
|
||||
Status DBImpl::DropAll() {
|
||||
Stop();
|
||||
return meta_ptr_->DropAll();
|
||||
}
|
||||
|
||||
Status DBImpl::Size(uint64_t& result) {
|
||||
return meta_ptr_->Size(result);
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
||||
@ -39,6 +39,7 @@ class DBImpl : public DB {
|
||||
|
||||
Status Start() override;
|
||||
Status Stop() override;
|
||||
Status DropAll() override;
|
||||
|
||||
Status CreateTable(meta::TableSchema &table_schema) override;
|
||||
|
||||
@ -58,6 +59,12 @@ class DBImpl : public DB {
|
||||
|
||||
Status InsertVectors(const std::string &table_id, uint64_t n, const float *vectors, IDNumbers &vector_ids) override;
|
||||
|
||||
Status CreateIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status DescribeIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status DropIndex(const std::string& table_id) override;
|
||||
|
||||
Status Query(const std::string &table_id,
|
||||
uint64_t k,
|
||||
uint64_t nq,
|
||||
@ -82,16 +89,8 @@ class DBImpl : public DB {
|
||||
const meta::DatesT &dates,
|
||||
QueryResults &results) override;
|
||||
|
||||
Status DropAll() override;
|
||||
|
||||
Status Size(uint64_t &result) override;
|
||||
|
||||
Status CreateIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status DescribeIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status DropIndex(const std::string& table_id) override;
|
||||
|
||||
private:
|
||||
Status QueryAsync(const std::string &table_id,
|
||||
const meta::TableFilesSchema &files,
|
||||
|
||||
@ -48,9 +48,7 @@ MySQLMetaImpl::MySQLMetaImpl(const DBMetaOptions &options_, const int &mode)
|
||||
}
|
||||
|
||||
MySQLMetaImpl::~MySQLMetaImpl() {
|
||||
if (mode_ != Options::MODE::READ_ONLY) {
|
||||
CleanUp();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::NextTableId(std::string &table_id) {
|
||||
@ -2002,6 +2000,7 @@ Status MySQLMetaImpl::Count(const std::string &table_id, uint64_t &result) {
|
||||
|
||||
Status MySQLMetaImpl::DropAll() {
|
||||
try {
|
||||
ENGINE_LOG_DEBUG << "Drop all mysql meta";
|
||||
ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab);
|
||||
|
||||
if (connectionPtr == nullptr) {
|
||||
|
||||
@ -74,7 +74,7 @@ SqliteMetaImpl::SqliteMetaImpl(const DBMetaOptions &options_)
|
||||
}
|
||||
|
||||
SqliteMetaImpl::~SqliteMetaImpl() {
|
||||
CleanUp();
|
||||
|
||||
}
|
||||
|
||||
Status SqliteMetaImpl::NextTableId(std::string &table_id) {
|
||||
@ -1205,6 +1205,14 @@ Status SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) {
|
||||
}
|
||||
|
||||
Status SqliteMetaImpl::DropAll() {
|
||||
ENGINE_LOG_DEBUG << "Drop all sqlite meta";
|
||||
|
||||
try {
|
||||
ConnectorPtr->drop_table("Tables");
|
||||
ConnectorPtr->drop_table("TableFiles");
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when drop all meta", e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ void BuildVectors(int64_t n, std::vector<float> &vectors) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) {
|
||||
TEST_F(MemManagerTest, VECTOR_SOURCE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
|
||||
@ -102,7 +102,7 @@ TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) {
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) {
|
||||
TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
@ -148,7 +148,7 @@ TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) {
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, MEM_TABLE_TEST) {
|
||||
TEST_F(MemManagerTest, MEM_TABLE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
@ -212,19 +212,11 @@ TEST_F(NewMemManagerTest, MEM_TABLE_TEST) {
|
||||
status = mem_table.Serialize();
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
|
||||
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
TEST_F(MemManagerTest, SERIAL_INSERT_SEARCH_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -268,18 +260,9 @@ TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) {
|
||||
ASSERT_EQ(results[0][0].first, pair.first);
|
||||
ASSERT_LT(results[0][0].second, 0.00001);
|
||||
}
|
||||
|
||||
delete db_;
|
||||
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, INSERT_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
TEST_F(MemManagerTest, INSERT_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -303,18 +286,9 @@ TEST_F(NewMemManagerTest, INSERT_TEST) {
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
LOG(DEBUG) << "total_time spent in INSERT_TEST (ms) : " << total_time;
|
||||
|
||||
delete db_;
|
||||
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
TEST_F(MemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -383,12 +357,9 @@ TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) {
|
||||
}
|
||||
|
||||
search.join();
|
||||
|
||||
delete db_;
|
||||
};
|
||||
|
||||
TEST_F(DBTest, VECTOR_IDS_TEST)
|
||||
{
|
||||
TEST_F(MemManagerTest, VECTOR_IDS_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -458,39 +429,3 @@ TEST_F(DBTest, VECTOR_IDS_TEST)
|
||||
ASSERT_EQ(vector_ids[i], i + nb);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, MEMMANAGER_TEST) {
|
||||
int setenv_res = setenv("MILVUS_USE_OLD_MEM_MANAGER", "ON", 1);
|
||||
ASSERT_TRUE(setenv_res == 0);
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
engine::meta::TableSchema table_info_get;
|
||||
table_info_get.table_id_ = TABLE_NAME;
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
int insert_loop = 20;
|
||||
for (int i = 0; i < insert_loop; ++i) {
|
||||
int64_t nb = 40960;
|
||||
std::vector<float> xb;
|
||||
BuildVectors(nb, xb);
|
||||
engine::IDNumbers vector_ids;
|
||||
engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
LOG(DEBUG) << "total_time spent in INSERT_TEST (ms) : " << total_time;
|
||||
|
||||
delete db_;
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ namespace {
|
||||
}
|
||||
|
||||
|
||||
TEST_F(MySQLDBTest, DB_TEST) {
|
||||
TEST_F(MySqlDBTest, DB_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -131,7 +131,7 @@ TEST_F(MySQLDBTest, DB_TEST) {
|
||||
search.join();
|
||||
};
|
||||
|
||||
TEST_F(MySQLDBTest, SEARCH_TEST) {
|
||||
TEST_F(MySqlDBTest, SEARCH_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -183,7 +183,7 @@ TEST_F(MySQLDBTest, SEARCH_TEST) {
|
||||
ASSERT_STATS(stat);
|
||||
};
|
||||
|
||||
TEST_F(MySQLDBTest, ARHIVE_DISK_CHECK) {
|
||||
TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -228,7 +228,7 @@ TEST_F(MySQLDBTest, ARHIVE_DISK_CHECK) {
|
||||
ASSERT_LE(size, 1 * engine::meta::G);
|
||||
};
|
||||
|
||||
TEST_F(MySQLDBTest, DELETE_TEST) {
|
||||
TEST_F(MySqlDBTest, DELETE_TEST) {
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
// std::cout << stat.ToString() << std::endl;
|
||||
|
||||
@ -21,81 +21,59 @@
|
||||
|
||||
using namespace zilliz::milvus::engine;
|
||||
|
||||
TEST_F(MySQLTest, TABLE_TEST) {
|
||||
DBMetaOptions options;
|
||||
try {
|
||||
options = getDBMetaOptions();
|
||||
} catch(std::exception& ex) {
|
||||
ASSERT_TRUE(false);
|
||||
return;
|
||||
}
|
||||
|
||||
int mode = Options::MODE::SINGLE;
|
||||
meta::MySQLMetaImpl impl(options, mode);
|
||||
|
||||
TEST_F(MySqlMetaTest, TABLE_TEST) {
|
||||
auto table_id = "meta_test_table";
|
||||
|
||||
meta::TableSchema table;
|
||||
table.table_id_ = table_id;
|
||||
auto status = impl.CreateTable(table);
|
||||
auto status = impl_->CreateTable(table);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
auto gid = table.id_;
|
||||
table.id_ = -1;
|
||||
status = impl.DescribeTable(table);
|
||||
status = impl_->DescribeTable(table);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(table.id_, gid);
|
||||
ASSERT_EQ(table.table_id_, table_id);
|
||||
|
||||
table.table_id_ = "not_found";
|
||||
status = impl.DescribeTable(table);
|
||||
status = impl_->DescribeTable(table);
|
||||
ASSERT_TRUE(!status.ok());
|
||||
|
||||
table.table_id_ = table_id;
|
||||
status = impl.CreateTable(table);
|
||||
status = impl_->CreateTable(table);
|
||||
ASSERT_TRUE(status.IsAlreadyExist());
|
||||
|
||||
table.table_id_ = "";
|
||||
status = impl.CreateTable(table);
|
||||
status = impl_->CreateTable(table);
|
||||
// ASSERT_TRUE(status.ok());
|
||||
|
||||
status = impl.DropAll();
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(MySQLTest, TABLE_FILE_TEST) {
|
||||
DBMetaOptions options;
|
||||
try {
|
||||
options = getDBMetaOptions();
|
||||
} catch(std::exception& ex) {
|
||||
ASSERT_TRUE(false);
|
||||
return;
|
||||
}
|
||||
|
||||
int mode = Options::MODE::SINGLE;
|
||||
meta::MySQLMetaImpl impl(options, mode);
|
||||
|
||||
TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
|
||||
auto table_id = "meta_test_table";
|
||||
|
||||
meta::TableSchema table;
|
||||
table.table_id_ = table_id;
|
||||
table.dimension_ = 256;
|
||||
auto status = impl.CreateTable(table);
|
||||
auto status = impl_->CreateTable(table);
|
||||
|
||||
|
||||
meta::TableFileSchema table_file;
|
||||
table_file.table_id_ = table.table_id_;
|
||||
status = impl.CreateTableFile(table_file);
|
||||
status = impl_->CreateTableFile(table_file);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(table_file.file_type_, meta::TableFileSchema::NEW);
|
||||
|
||||
meta::DatesT dates;
|
||||
dates.push_back(utils::GetDate());
|
||||
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
|
||||
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
uint64_t cnt = 0;
|
||||
status = impl.Count(table_id, cnt);
|
||||
status = impl_->Count(table_id, cnt);
|
||||
// ASSERT_TRUE(status.ok());
|
||||
// ASSERT_EQ(cnt, 0UL);
|
||||
|
||||
@ -104,7 +82,7 @@ TEST_F(MySQLTest, TABLE_FILE_TEST) {
|
||||
auto new_file_type = meta::TableFileSchema::INDEX;
|
||||
table_file.file_type_ = new_file_type;
|
||||
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(table_file.file_type_, new_file_type);
|
||||
|
||||
@ -112,42 +90,31 @@ TEST_F(MySQLTest, TABLE_FILE_TEST) {
|
||||
for (auto i=2; i < 10; ++i) {
|
||||
dates.push_back(utils::GetDateWithDelta(-1*i));
|
||||
}
|
||||
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
|
||||
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
table_file.date_ = utils::GetDateWithDelta(-2);
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(table_file.date_, utils::GetDateWithDelta(-2));
|
||||
ASSERT_FALSE(table_file.file_type_ == meta::TableFileSchema::TO_DELETE);
|
||||
|
||||
dates.clear();
|
||||
dates.push_back(table_file.date_);
|
||||
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
|
||||
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
std::vector<size_t> ids = {table_file.id_};
|
||||
meta::TableFilesSchema files;
|
||||
status = impl.GetTableFiles(table_file.table_id_, ids, files);
|
||||
status = impl_->GetTableFiles(table_file.table_id_, ids, files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(files.size(), 1UL);
|
||||
ASSERT_TRUE(files[0].file_type_ == meta::TableFileSchema::TO_DELETE);
|
||||
|
||||
// status = impl.NextTableId(table_id);
|
||||
|
||||
status = impl.DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(MySQLTest, ARCHIVE_TEST_DAYS) {
|
||||
TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) {
|
||||
srand(time(0));
|
||||
DBMetaOptions options;
|
||||
try {
|
||||
options = getDBMetaOptions();
|
||||
} catch(std::exception& ex) {
|
||||
ASSERT_TRUE(false);
|
||||
return;
|
||||
}
|
||||
DBMetaOptions options = GetOptions().meta;
|
||||
|
||||
int days_num = rand() % 100;
|
||||
std::stringstream ss;
|
||||
@ -211,14 +178,8 @@ TEST_F(MySQLTest, ARCHIVE_TEST_DAYS) {
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(MySQLTest, ARCHIVE_TEST_DISK) {
|
||||
DBMetaOptions options;
|
||||
try {
|
||||
options = getDBMetaOptions();
|
||||
} catch(std::exception& ex) {
|
||||
ASSERT_TRUE(false);
|
||||
return;
|
||||
}
|
||||
TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) {
|
||||
DBMetaOptions options = GetOptions().meta;
|
||||
|
||||
options.archive_conf = ArchiveConf("delete", "disk:11");
|
||||
int mode = Options::MODE::SINGLE;
|
||||
@ -269,23 +230,12 @@ TEST_F(MySQLTest, ARCHIVE_TEST_DISK) {
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(MySQLTest, TABLE_FILES_TEST) {
|
||||
DBMetaOptions options;
|
||||
try {
|
||||
options = getDBMetaOptions();
|
||||
} catch(std::exception& ex) {
|
||||
ASSERT_TRUE(false);
|
||||
return;
|
||||
}
|
||||
|
||||
int mode = Options::MODE::SINGLE;
|
||||
auto impl = meta::MySQLMetaImpl(options, mode);
|
||||
|
||||
TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
|
||||
auto table_id = "meta_test_group";
|
||||
|
||||
meta::TableSchema table;
|
||||
table.table_id_ = table_id;
|
||||
auto status = impl.CreateTable(table);
|
||||
auto status = impl_->CreateTable(table);
|
||||
|
||||
int new_files_cnt = 4;
|
||||
int raw_files_cnt = 5;
|
||||
@ -296,66 +246,66 @@ TEST_F(MySQLTest, TABLE_FILES_TEST) {
|
||||
table_file.table_id_ = table.table_id_;
|
||||
|
||||
for (auto i=0; i<new_files_cnt; ++i) {
|
||||
status = impl.CreateTableFile(table_file);
|
||||
status = impl_->CreateTableFile(table_file);
|
||||
table_file.file_type_ = meta::TableFileSchema::NEW;
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
}
|
||||
|
||||
for (auto i=0; i<raw_files_cnt; ++i) {
|
||||
status = impl.CreateTableFile(table_file);
|
||||
status = impl_->CreateTableFile(table_file);
|
||||
table_file.file_type_ = meta::TableFileSchema::RAW;
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
}
|
||||
|
||||
for (auto i=0; i<to_index_files_cnt; ++i) {
|
||||
status = impl.CreateTableFile(table_file);
|
||||
status = impl_->CreateTableFile(table_file);
|
||||
table_file.file_type_ = meta::TableFileSchema::TO_INDEX;
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
}
|
||||
|
||||
for (auto i=0; i<index_files_cnt; ++i) {
|
||||
status = impl.CreateTableFile(table_file);
|
||||
status = impl_->CreateTableFile(table_file);
|
||||
table_file.file_type_ = meta::TableFileSchema::INDEX;
|
||||
status = impl.UpdateTableFile(table_file);
|
||||
status = impl_->UpdateTableFile(table_file);
|
||||
}
|
||||
|
||||
meta::TableFilesSchema files;
|
||||
|
||||
status = impl.FilesToIndex(files);
|
||||
status = impl_->FilesToIndex(files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(files.size(), to_index_files_cnt);
|
||||
|
||||
meta::DatePartionedTableFilesSchema dated_files;
|
||||
status = impl.FilesToMerge(table.table_id_, dated_files);
|
||||
status = impl_->FilesToMerge(table.table_id_, dated_files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(dated_files[table_file.date_].size(), raw_files_cnt);
|
||||
|
||||
status = impl.FilesToIndex(files);
|
||||
status = impl_->FilesToIndex(files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(files.size(), to_index_files_cnt);
|
||||
|
||||
meta::DatesT dates = {table_file.date_};
|
||||
std::vector<size_t> ids;
|
||||
status = impl.FilesToSearch(table_id, ids, dates, dated_files);
|
||||
status = impl_->FilesToSearch(table_id, ids, dates, dated_files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(dated_files[table_file.date_].size(),
|
||||
to_index_files_cnt+raw_files_cnt+index_files_cnt);
|
||||
|
||||
status = impl.FilesToSearch(table_id, ids, meta::DatesT(), dated_files);
|
||||
status = impl_->FilesToSearch(table_id, ids, meta::DatesT(), dated_files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(dated_files[table_file.date_].size(),
|
||||
to_index_files_cnt+raw_files_cnt+index_files_cnt);
|
||||
|
||||
status = impl.FilesToSearch(table_id, ids, meta::DatesT(), dated_files);
|
||||
status = impl_->FilesToSearch(table_id, ids, meta::DatesT(), dated_files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(dated_files[table_file.date_].size(),
|
||||
to_index_files_cnt+raw_files_cnt+index_files_cnt);
|
||||
|
||||
ids.push_back(size_t(9999999999));
|
||||
status = impl.FilesToSearch(table_id, ids, dates, dated_files);
|
||||
status = impl_->FilesToSearch(table_id, ids, dates, dated_files);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(dated_files[table_file.date_].size(),0);
|
||||
|
||||
status = impl.DropAll();
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
@ -42,8 +42,8 @@ void ASSERT_STATS(engine::Status& stat) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void DBTest::InitLog() {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void BaseTest::InitLog() {
|
||||
el::Configurations defaultConf;
|
||||
defaultConf.setToDefault();
|
||||
defaultConf.set(el::Level::Debug,
|
||||
@ -51,13 +51,14 @@ void DBTest::InitLog() {
|
||||
el::Loggers::reconfigureLogger("default", defaultConf);
|
||||
}
|
||||
|
||||
engine::Options DBTest::GetOptions() {
|
||||
engine::Options BaseTest::GetOptions() {
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
return options;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void DBTest::SetUp() {
|
||||
InitLog();
|
||||
|
||||
@ -82,6 +83,7 @@ void DBTest::SetUp() {
|
||||
}
|
||||
|
||||
void DBTest::TearDown() {
|
||||
db_->Stop();
|
||||
db_->DropAll();
|
||||
delete db_;
|
||||
|
||||
@ -91,6 +93,7 @@ void DBTest::TearDown() {
|
||||
boost::filesystem::remove_all("/tmp/milvus_test");
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
engine::Options DBTest2::GetOptions() {
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
@ -99,6 +102,7 @@ engine::Options DBTest2::GetOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void MetaTest::SetUp() {
|
||||
InitLog();
|
||||
impl_ = engine::DBMetaImplFactory::Build();
|
||||
@ -108,21 +112,8 @@ void MetaTest::TearDown() {
|
||||
impl_->DropAll();
|
||||
}
|
||||
|
||||
zilliz::milvus::engine::DBMetaOptions MySQLTest::getDBMetaOptions() {
|
||||
// std::string path = "/tmp/milvus_test";
|
||||
// engine::DBMetaOptions options = engine::DBMetaOptionsFactory::Build(path);
|
||||
zilliz::milvus::engine::DBMetaOptions options;
|
||||
options.path = "/tmp/milvus_test";
|
||||
options.backend_uri = DBTestEnvironment::getURI();
|
||||
|
||||
if(options.backend_uri.empty()) {
|
||||
options.backend_uri = "mysql://root:Fantast1c@192.168.1.194:3306/";
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
zilliz::milvus::engine::Options MySQLDBTest::GetOptions() {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
engine::Options MySqlDBTest::GetOptions() {
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = DBTestEnvironment::getURI();
|
||||
@ -134,33 +125,32 @@ zilliz::milvus::engine::Options MySQLDBTest::GetOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
void NewMemManagerTest::InitLog() {
|
||||
el::Configurations defaultConf;
|
||||
defaultConf.setToDefault();
|
||||
defaultConf.set(el::Level::Debug,
|
||||
el::ConfigurationType::Format, "[%thread-%datetime-%level]: %msg (%fbase:%line)");
|
||||
el::Loggers::reconfigureLogger("default", defaultConf);
|
||||
}
|
||||
|
||||
void NewMemManagerTest::SetUp() {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void MySqlMetaTest::SetUp() {
|
||||
InitLog();
|
||||
|
||||
auto res_mgr = engine::ResMgrInst::GetInstance();
|
||||
res_mgr->Clear();
|
||||
res_mgr->Add(engine::ResourceFactory::Create("disk", "DISK", 0, true, false));
|
||||
res_mgr->Add(engine::ResourceFactory::Create("cpu", "CPU", 0, true, true));
|
||||
|
||||
auto default_conn = engine::Connection("IO", 500.0);
|
||||
res_mgr->Connect("disk", "cpu", default_conn);
|
||||
res_mgr->Start();
|
||||
engine::SchedInst::GetInstance()->Start();
|
||||
engine::DBMetaOptions options = GetOptions().meta;
|
||||
int mode = engine::Options::MODE::SINGLE;
|
||||
impl_ = std::make_shared<engine::meta::MySQLMetaImpl>(options, mode);
|
||||
}
|
||||
|
||||
void NewMemManagerTest::TearDown() {
|
||||
engine::ResMgrInst::GetInstance()->Stop();
|
||||
engine::SchedInst::GetInstance()->Stop();
|
||||
void MySqlMetaTest::TearDown() {
|
||||
impl_->DropAll();
|
||||
}
|
||||
|
||||
zilliz::milvus::engine::Options MySqlMetaTest::GetOptions() {
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = DBTestEnvironment::getURI();
|
||||
|
||||
if(options.meta.backend_uri.empty()) {
|
||||
options.meta.backend_uri = "mysql://root:Fantast1c@192.168.1.194:3306/";
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
int main(int argc, char **argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
if (argc > 1) {
|
||||
|
||||
@ -34,44 +34,29 @@
|
||||
|
||||
void ASSERT_STATS(zilliz::milvus::engine::Status &stat);
|
||||
|
||||
//class TestEnv : public ::testing::Environment {
|
||||
//public:
|
||||
//
|
||||
// static std::string getURI() {
|
||||
// if (const char* uri = std::getenv("MILVUS_DBMETA_URI")) {
|
||||
// return uri;
|
||||
// }
|
||||
// else {
|
||||
// return "";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// void SetUp() override {
|
||||
// getURI();
|
||||
// }
|
||||
//
|
||||
//};
|
||||
//
|
||||
//::testing::Environment* const test_env =
|
||||
// ::testing::AddGlobalTestEnvironment(new TestEnv);
|
||||
|
||||
class DBTest : public ::testing::Test {
|
||||
protected:
|
||||
zilliz::milvus::engine::DB *db_;
|
||||
|
||||
class BaseTest : public ::testing::Test {
|
||||
protected:
|
||||
void InitLog();
|
||||
virtual void SetUp() override;
|
||||
virtual void TearDown() override;
|
||||
virtual zilliz::milvus::engine::Options GetOptions();
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class DBTest : public BaseTest {
|
||||
protected:
|
||||
zilliz::milvus::engine::DB *db_;
|
||||
|
||||
virtual void SetUp() override;
|
||||
virtual void TearDown() override;
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class DBTest2 : public DBTest {
|
||||
protected:
|
||||
virtual zilliz::milvus::engine::Options GetOptions() override;
|
||||
};
|
||||
|
||||
|
||||
class MetaTest : public DBTest {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class MetaTest : public BaseTest {
|
||||
protected:
|
||||
std::shared_ptr<zilliz::milvus::engine::meta::SqliteMetaImpl> impl_;
|
||||
|
||||
@ -79,19 +64,23 @@ class MetaTest : public DBTest {
|
||||
virtual void TearDown() override;
|
||||
};
|
||||
|
||||
class MySQLTest : public ::testing::Test {
|
||||
protected:
|
||||
// std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
|
||||
zilliz::milvus::engine::DBMetaOptions getDBMetaOptions();
|
||||
};
|
||||
|
||||
class MySQLDBTest : public DBTest {
|
||||
protected:
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class MySqlDBTest : public DBTest {
|
||||
protected:
|
||||
zilliz::milvus::engine::Options GetOptions();
|
||||
};
|
||||
|
||||
class NewMemManagerTest : public ::testing::Test {
|
||||
void InitLog();
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class MySqlMetaTest : public BaseTest {
|
||||
protected:
|
||||
std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
|
||||
|
||||
virtual void SetUp() override;
|
||||
virtual void TearDown() override;
|
||||
zilliz::milvus::engine::Options GetOptions();
|
||||
};
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class MemManagerTest : public DBTest {
|
||||
};
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user